This commit introduces support for Ollama as an alternative Large Language Model (LLM) provider and enhances PDF image extraction capabilities.
- **Ollama Integration:**
- Implemented `set_ollama_config` to configure Ollama's base URL from `config.ini`.
- Modified `llm.py` to dynamically select and configure the LLM (Gemini or Ollama) based on the `PROVIDER` setting.
- Updated `get_model_name` to return provider-specific default model names.
- `pdf_convertor.py` now conditionally initializes `ChatGoogleGenerativeAI` or `ChatOllama` based on the configured provider.
- **PyMuPDF Image Extraction:**
- Added a new `extract_images_from_pdf` function using PyMuPDF (`fitz`) for direct image extraction from PDF files.
- Introduced `get_extract_images_from_pdf_flag` to control this feature via `config.ini`.
- `convert_pdf_to_markdown` and `refine_content` functions were updated to utilize this new image extraction method when enabled.
- **Refinement Flow:**
- Adjusted the order of `save_md_images` in `main.py` and added an option to save the refined markdown with a specific filename (`index_refined.md`).
- **Dependencies:**
- Updated `pyproject.lock` to include new dependencies for Ollama integration (`langchain-ollama`) and PyMuPDF (`PyMuPDF`), along with platform-specific markers for NVIDIA dependencies.
54 lines
1.5 KiB
Python
Executable File
54 lines
1.5 KiB
Python
Executable File
import configparser
|
|
import os
|
|
|
|
|
|
def set_api_key(provider: str) -> None:
|
|
if provider == "gemini":
|
|
set_gemini_api_key()
|
|
elif provider == "ollama":
|
|
set_ollama_config()
|
|
|
|
|
|
def set_gemini_api_key() -> None:
|
|
config = configparser.ConfigParser()
|
|
config.read("config.ini")
|
|
google_api_key = config.get("llm", "GOOGLE_API_KEY", fallback=None)
|
|
|
|
if not os.environ.get("GOOGLE_API_KEY"):
|
|
if google_api_key:
|
|
os.environ["GOOGLE_API_KEY"] = google_api_key
|
|
else:
|
|
raise ValueError(
|
|
"Error: GOOGLE_API_KEY not found in config.ini or environment variables"
|
|
)
|
|
return
|
|
|
|
|
|
def set_ollama_config() -> None:
|
|
config = configparser.ConfigParser()
|
|
config.read("config.ini")
|
|
ollama_base_url = config.get(
|
|
"llm", "OLLAMA_BASE_URL", fallback="http://localhost:11434"
|
|
)
|
|
|
|
if not os.environ.get("OLLAMA_BASE_URL"):
|
|
os.environ["OLLAMA_BASE_URL"] = ollama_base_url
|
|
return
|
|
|
|
|
|
def get_model_name() -> str:
|
|
config = configparser.ConfigParser()
|
|
config.read("config.ini")
|
|
provider = config.get("llm", "PROVIDER", fallback="gemini")
|
|
if provider == "gemini":
|
|
return config.get("llm", "MODEL_NAME", fallback="gemini-2.5-flash")
|
|
elif provider == "ollama":
|
|
return config.get("llm", "MODEL_NAME", fallback="gemma3:latest")
|
|
return "gemini-2.5-flash" # Default fallback
|
|
|
|
|
|
def get_temperature() -> float:
|
|
config = configparser.ConfigParser()
|
|
config.read("config.ini")
|
|
return float(config.get("llm", "TEMPERATURE", fallback=0.7))
|