feat(llm): Add Ollama provider and PyMuPDF image extraction
This commit introduces support for Ollama as an alternative Large Language Model (LLM) provider and enhances PDF image extraction capabilities.
- **Ollama Integration:**
- Implemented `set_ollama_config` to configure Ollama's base URL from `config.ini`.
- Modified `llm.py` to dynamically select and configure the LLM (Gemini or Ollama) based on the `PROVIDER` setting.
- Updated `get_model_name` to return provider-specific default model names.
- `pdf_convertor.py` now conditionally initializes `ChatGoogleGenerativeAI` or `ChatOllama` based on the configured provider.
- **PyMuPDF Image Extraction:**
- Added a new `extract_images_from_pdf` function using PyMuPDF (`fitz`) for direct image extraction from PDF files.
- Introduced `get_extract_images_from_pdf_flag` to control this feature via `config.ini`.
- `convert_pdf_to_markdown` and `refine_content` functions were updated to utilize this new image extraction method when enabled.
- **Refinement Flow:**
- Adjusted the order of `save_md_images` in `main.py` and added an option to save the refined markdown with a specific filename (`index_refined.md`).
- **Dependencies:**
- Updated `pyproject.lock` to include new dependencies for Ollama integration (`langchain-ollama`) and PyMuPDF (`PyMuPDF`), along with platform-specific markers for NVIDIA dependencies.
This commit is contained in:
132
pdf_convertor.py
132
pdf_convertor.py
@@ -1,5 +1,6 @@
|
||||
import re
|
||||
import base64
|
||||
import os
|
||||
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
|
||||
from docling.datamodel.base_models import InputFormat
|
||||
from docling.datamodel.pipeline_options import (
|
||||
@@ -9,11 +10,14 @@ from docling_core.types.io import DocumentStream
|
||||
from docling.datamodel.settings import settings
|
||||
from docling.document_converter import DocumentConverter, PdfFormatOption
|
||||
from docling_core.types.doc.base import ImageRefMode
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
from llm import set_gemini_api_key, get_model_name
|
||||
from langchain_ollama import ChatOllama
|
||||
from llm import set_api_key, get_model_name, get_temperature
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
import configparser
|
||||
import fitz
|
||||
|
||||
|
||||
def save_md_images(
|
||||
@@ -44,7 +48,7 @@ def load_md_file(md_path: str | Path) -> tuple[str, dict[str, bytes]]:
|
||||
md_path = Path(md_path)
|
||||
with open(md_path, "r") as md_file:
|
||||
md = md_file.read()
|
||||
images: list[str] = re.findall(r"!\[Image\]\((.*?)\)", md)
|
||||
images: list[str] = re.findall(r"!\[.*?\]\((.*?)\)", md)
|
||||
image_dict: dict[str, bytes] = dict()
|
||||
for i in range(len(images)):
|
||||
image_path = images[i]
|
||||
@@ -112,26 +116,54 @@ def convert_pdf_to_markdown(pdf: bytes) -> tuple[str, dict[str, bytes]]:
|
||||
def refine_content(md: str, images: dict[str, bytes], pdf: bytes) -> str:
|
||||
"""Refines the Markdown content using an LLM."""
|
||||
|
||||
set_gemini_api_key()
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
provider = config.get("llm", "PROVIDER", fallback="gemini")
|
||||
|
||||
set_api_key(provider)
|
||||
|
||||
try:
|
||||
llm = ChatGoogleGenerativeAI(model=get_model_name(), temperature=0.7)
|
||||
if provider == "gemini":
|
||||
llm = ChatGoogleGenerativeAI(
|
||||
model=get_model_name(), temperature=get_temperature()
|
||||
)
|
||||
elif provider == "ollama":
|
||||
llm = ChatOllama(
|
||||
model=get_model_name(),
|
||||
temperature=get_temperature(),
|
||||
base_url=os.environ["OLLAMA_BASE_URL"],
|
||||
num_ctx=256000,
|
||||
num_predict=-1,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
except Exception as e:
|
||||
raise BaseException(
|
||||
f"Error initializing LLM. Make sure your Google API key is set correctly. Error: {e}"
|
||||
f"Error initializing LLM. Make sure your LLM configuration is correct. Error: {e}"
|
||||
)
|
||||
|
||||
with open("pdf_convertor_prompt.md", "r") as f:
|
||||
prompt = f.read()
|
||||
|
||||
human_message_parts = [
|
||||
{
|
||||
"type": "media",
|
||||
"mime_type": "text/markdown",
|
||||
"data": base64.b64encode(md.encode("UTF-8")).decode("utf-8"),
|
||||
},
|
||||
]
|
||||
# 添加 Markdown
|
||||
human_message_parts = [{"type": "text", "text": prompt}]
|
||||
if provider == "gemini":
|
||||
human_message_parts.append(
|
||||
{
|
||||
"type": "media",
|
||||
"mime_type": "text/markdown",
|
||||
"data": base64.b64encode(md.encode("UTF-8")).decode("utf-8"),
|
||||
}
|
||||
)
|
||||
elif provider == "ollama":
|
||||
human_message_parts.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": md,
|
||||
}
|
||||
)
|
||||
|
||||
# 添加图片
|
||||
for image_name in images.keys():
|
||||
human_message_parts.append(
|
||||
{
|
||||
@@ -139,35 +171,63 @@ def refine_content(md: str, images: dict[str, bytes], pdf: bytes) -> str:
|
||||
"text": f"This is image: '{image_name}':\n",
|
||||
}
|
||||
)
|
||||
human_message_parts.append(
|
||||
{
|
||||
"type": "media",
|
||||
"mime_type": "image/png",
|
||||
"data": base64.b64encode(images[image_name]).decode("utf-8"),
|
||||
}
|
||||
)
|
||||
if provider == "gemini":
|
||||
human_message_parts.append(
|
||||
{
|
||||
"type": "media",
|
||||
"mime_type": "image/png",
|
||||
"data": base64.b64encode(images[image_name]).decode("utf-8"),
|
||||
}
|
||||
)
|
||||
if provider == "ollama":
|
||||
human_message_parts.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": f"data:image/png;base64,{base64.b64encode(images[image_name]).decode('utf-8')}",
|
||||
}
|
||||
)
|
||||
|
||||
human_message_parts.extend(
|
||||
[
|
||||
{
|
||||
"type": "text",
|
||||
"text": "This is original PDF file:\n",
|
||||
},
|
||||
{
|
||||
"type": "media",
|
||||
"mime_type": "application/pdf",
|
||||
"data": base64.b64encode(pdf).decode("utf-8"),
|
||||
},
|
||||
]
|
||||
)
|
||||
# 添加 PDF
|
||||
if provider == "gemini":
|
||||
human_message_parts.extend(
|
||||
[
|
||||
{
|
||||
"type": "text",
|
||||
"text": "This is original PDF file:\n",
|
||||
},
|
||||
{
|
||||
"type": "media",
|
||||
"mime_type": "application/pdf",
|
||||
"data": base64.b64encode(pdf).decode("utf-8"),
|
||||
},
|
||||
]
|
||||
)
|
||||
if provider == "ollama":
|
||||
doc = fitz.open(stream=pdf, filetype="pdf")
|
||||
for page_num in range(doc.page_count):
|
||||
page = doc.load_page(page_num)
|
||||
pix = page.get_pixmap()
|
||||
img_bytes = pix.tobytes("png")
|
||||
human_message_parts.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": f"This is page {page_num + 1} of the original PDF file:\n",
|
||||
}
|
||||
)
|
||||
human_message_parts.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": f"data:image/png;base64,{base64.b64encode(img_bytes).decode('utf-8')}",
|
||||
}
|
||||
)
|
||||
doc.close()
|
||||
|
||||
message_content = [
|
||||
SystemMessage(content=prompt),
|
||||
HumanMessage(content=human_message_parts), # type: ignore
|
||||
]
|
||||
|
||||
print(
|
||||
"Sending request to Gemini with the PDF, Markdown and referenced images... This may take a moment."
|
||||
f"Sending request to {provider} with the PDF, Markdown and referenced images... This may take a moment."
|
||||
)
|
||||
try:
|
||||
response = llm.invoke(message_content)
|
||||
@@ -176,7 +236,7 @@ def refine_content(md: str, images: dict[str, bytes], pdf: bytes) -> str:
|
||||
raise BaseException(f"An error occurred while invoking the LLM: {e}")
|
||||
|
||||
if str(refined_content) == "":
|
||||
raise BaseException("Response of Gemini is empty")
|
||||
raise BaseException(f"Response of {provider} is empty")
|
||||
|
||||
return fix_output(str(refined_content))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user