refactor: 统一使用 OpenAI 兼容 API,支持自定义 base_url/key/model
- 移除 Gemini 和 Ollama 独立适配,统一使用 ChatOpenAI + base_url - config.ini 简化为 BASE_URL / API_KEY / MODEL / TEMPERATURE / MAX_RETRIES - 新增 config.example.ini 示例配置 - 移除 langchain-google-genai / langchain-ollama / pymupdf 依赖 - main.py 新增断点续跑:跳过已有 index.md / index_refined.md - LLM 请求支持 max_retries 自动重试(默认 3 次) - 优化 README
This commit is contained in:
@@ -5,70 +5,36 @@ import os
|
||||
def set_api_key() -> None:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
provider = config.get("llm", "PROVIDER", fallback="gemini")
|
||||
if provider == "gemini":
|
||||
set_gemini_api_key()
|
||||
elif provider == "ollama":
|
||||
set_ollama_config()
|
||||
elif provider == "openai":
|
||||
set_openai_api_key()
|
||||
|
||||
|
||||
def set_openai_api_key() -> None:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
openai_api_key = config.get("llm", "OPENAI_API_KEY", fallback=None)
|
||||
api_key = config.get("llm", "API_KEY", fallback=None)
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
if openai_api_key:
|
||||
os.environ["OPENAI_API_KEY"] = openai_api_key
|
||||
if api_key:
|
||||
os.environ["OPENAI_API_KEY"] = api_key
|
||||
else:
|
||||
raise ValueError(
|
||||
"Error: OPENAI_API_KEY not found in config.ini or environment variables"
|
||||
"Error: API_KEY not found in config.ini or environment variables"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
def set_gemini_api_key() -> None:
|
||||
def get_base_url() -> str | None:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
google_api_key = config.get("llm", "GOOGLE_API_KEY", fallback=None)
|
||||
|
||||
if not os.environ.get("GOOGLE_API_KEY"):
|
||||
if google_api_key:
|
||||
os.environ["GOOGLE_API_KEY"] = google_api_key
|
||||
else:
|
||||
raise ValueError(
|
||||
"Error: GOOGLE_API_KEY not found in config.ini or environment variables"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
def set_ollama_config() -> None:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
ollama_base_url = config.get(
|
||||
"llm", "OLLAMA_BASE_URL", fallback="http://localhost:11434"
|
||||
)
|
||||
|
||||
if not os.environ.get("OLLAMA_BASE_URL"):
|
||||
os.environ["OLLAMA_BASE_URL"] = ollama_base_url
|
||||
return
|
||||
base_url = config.get("llm", "BASE_URL", fallback=None)
|
||||
return base_url or os.environ.get("OPENAI_BASE_URL")
|
||||
|
||||
|
||||
def get_model_name() -> str:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
provider = config.get("llm", "PROVIDER", fallback="gemini")
|
||||
if provider == "gemini":
|
||||
return config.get("llm", "GEMINI_MODEL_NAME", fallback="gemini-2.5-flash")
|
||||
elif provider == "ollama":
|
||||
return config.get("llm", "OLLAMA_MODEL_NAME", fallback="gemma3:latest")
|
||||
elif provider == "openai":
|
||||
return config.get("llm", "OPENAI_MODEL_NAME", fallback="gpt-5-mini")
|
||||
return "gemini-2.5-flash" # Default fallback
|
||||
return config.get("llm", "MODEL", fallback="gpt-4o")
|
||||
|
||||
|
||||
def get_temperature() -> float:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
return float(config.get("llm", "TEMPERATURE", fallback=0.7))
|
||||
|
||||
|
||||
def get_max_retries() -> int:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
return int(config.get("llm", "MAX_RETRIES", fallback=3))
|
||||
|
||||
Reference in New Issue
Block a user