add llm ai
This commit is contained in:
@@ -32,6 +32,10 @@ def load_config():
|
||||
|
||||
# 合并配置,local_config优先级更高
|
||||
config = OmegaConf.merge(base_config, local_config)
|
||||
|
||||
# 把paths下的所有值pathlib化
|
||||
for key, value in config.paths.items():
|
||||
config.paths[key] = Path(value)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
8
src/utils/io.py
Normal file
8
src/utils/io.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from pathlib import Path
|
||||
|
||||
def read_txt(path: Path) -> str:
|
||||
"""
|
||||
读入中文txt文件
|
||||
"""
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
@@ -1,7 +1,10 @@
|
||||
from litellm import completion
|
||||
from langchain.prompts import PromptTemplate
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
from src.utils.config import CONFIG
|
||||
from src.utils.io import read_txt
|
||||
|
||||
def get_prompt(template: str, infos: dict) -> str:
|
||||
"""
|
||||
@@ -31,4 +34,23 @@ def call_llm(prompt: str) -> str:
|
||||
)
|
||||
|
||||
# 返回生成的内容
|
||||
return response.choices[0].message.content
|
||||
return response.choices[0].message.content
|
||||
|
||||
def get_prompt_and_call_llm(template_path: Path, infos: dict) -> str:
|
||||
"""
|
||||
根据模板,获取提示词,并调用LLM
|
||||
"""
|
||||
template = read_txt(template_path)
|
||||
prompt = get_prompt(template, infos)
|
||||
res = call_llm(prompt)
|
||||
json_res = json.loads(res)
|
||||
print(f"prompt = {prompt}")
|
||||
print(f"res = {res}")
|
||||
return json_res
|
||||
|
||||
def get_ai_prompt_and_call_llm(infos: dict) -> dict:
|
||||
"""
|
||||
根据模板,获取提示词,并调用LLM
|
||||
"""
|
||||
template_path = CONFIG.paths.templates / "ai.txt"
|
||||
return get_prompt_and_call_llm(template_path, infos)
|
||||
Reference in New Issue
Block a user