From d55ada7d66eddc1eb5206d641aeb7eaad51b2e21 Mon Sep 17 00:00:00 2001 From: bridge Date: Tue, 30 Dec 2025 22:20:30 +0800 Subject: [PATCH] refactor llm config --- tools/slice_images.py => slice_images.py | 0 src/server/main.py | 123 +++++++++++++++++++---- src/utils/config.py | 9 +- src/utils/llm/client.py | 25 ++++- src/utils/llm/config.py | 56 ++++++----- static/config.yml | 8 -- web/src/App.vue | 13 ++- web/src/api/http.ts | 20 +++- web/src/components/SystemMenu.vue | 19 ++-- web/src/stores/socket.ts | 31 ++++++ 10 files changed, 235 insertions(+), 69 deletions(-) rename tools/slice_images.py => slice_images.py (100%) diff --git a/tools/slice_images.py b/slice_images.py similarity index 100% rename from tools/slice_images.py rename to slice_images.py diff --git a/src/server/main.py b/src/server/main.py index 0db5cf5..b3cc077 100644 --- a/src/server/main.py +++ b/src/server/main.py @@ -210,8 +210,57 @@ def serialize_phenomenon(phenomenon) -> Optional[dict]: "effect_desc": effect_desc } +def check_llm_connectivity() -> tuple[bool, str]: + """ + 检查 LLM 连通性 + + Returns: + (是否成功, 错误信息) + """ + try: + from src.utils.llm.config import LLMMode, LLMConfig + + normal_config = LLMConfig.from_mode(LLMMode.NORMAL) + fast_config = LLMConfig.from_mode(LLMMode.FAST) + + # 检查配置是否完整 + if not normal_config.api_key or not normal_config.base_url: + return False, "LLM 配置不完整:请填写 API Key 和 Base URL" + + if not normal_config.model_name: + return False, "LLM 配置不完整:请填写智能模型名称" + + # 判断是否需要测试两次 + same_model = (normal_config.model_name == fast_config.model_name and + normal_config.base_url == fast_config.base_url and + normal_config.api_key == fast_config.api_key) + + if same_model: + # 只测试一次 + print(f"检测 LLM 连通性(单模型): {normal_config.model_name}") + success, error = test_connectivity(LLMMode.NORMAL, normal_config) + if not success: + return False, f"连接失败:{error}" + else: + # 测试两次 + print(f"检测智能模型连通性: {normal_config.model_name}") + success, error = test_connectivity(LLMMode.NORMAL, normal_config) + if not success: + return False, f"智能模型连接失败:{error}" + + print(f"检测快速模型连通性: {fast_config.model_name}") + success, error = test_connectivity(LLMMode.FAST, fast_config) + if not success: + return False, f"快速模型连接失败:{error}" + + return True, "" + + except Exception as e: + return False, f"连通性检测异常:{str(e)}" + def init_game(): """初始化游戏世界,逻辑复用自 src/run/run.py""" + print("正在初始化游戏世界...") game_map = load_cultivation_world_map() world = World(map=game_map, month_stamp=create_month_stamp(Year(100), Month.JANUARY)) @@ -267,6 +316,24 @@ def init_game(): game_instance["world"] = world game_instance["sim"] = sim print("游戏世界初始化完成!") + + # ===== LLM 连通性检测(在 simulator 运行前)===== + print("正在检测 LLM 连通性...") + success, error_msg = check_llm_connectivity() + + if not success: + print(f"[警告] LLM 连通性检测失败: {error_msg}") + print("[警告] Simulator 已暂停,等待配置 LLM...") + game_instance["llm_check_failed"] = True + game_instance["llm_error_message"] = error_msg + game_instance["is_paused"] = True + print("等待前端连接并配置 LLM...") + else: + print("LLM 连通性检测通过 ✓") + game_instance["llm_check_failed"] = False + game_instance["llm_error_message"] = "" + game_instance["is_paused"] = False + # ===== LLM 检测结束 ===== async def game_loop(): """后台自动运行游戏循环""" @@ -455,6 +522,17 @@ print(f"Web dist path: {WEB_DIST_PATH}") @app.websocket("/ws") async def websocket_endpoint(websocket: WebSocket): await manager.connect(websocket) + + # ===== 检查 LLM 状态并通知前端 ===== + if game_instance.get("llm_check_failed", False): + error_msg = game_instance.get("llm_error_message", "LLM 连接失败") + await websocket.send_json({ + "type": "llm_config_required", + "error": error_msg + }) + print(f"已向客户端发送 LLM 配置要求: {error_msg}") + # ===== 检测结束 ===== + try: while True: # 保持连接活跃,接收客户端指令(目前暂不处理复杂指令) @@ -1024,17 +1102,22 @@ def test_llm_connection(req: TestConnectionRequest): model_name=req.model_name ) - success = test_connectivity(config=config) + success, error_msg = test_connectivity(config=config) if success: return {"status": "ok", "message": "连接成功"} else: - raise HTTPException(status_code=400, detail="连接失败") + # 返回 400 错误并附带详细的错误信息 + raise HTTPException(status_code=400, detail=error_msg) + except HTTPException: + # 重新抛出 HTTPException + raise except Exception as e: + # 其他未预期的错误 raise HTTPException(status_code=500, detail=f"测试出错: {str(e)}") @app.post("/api/config/llm/save") -def save_llm_config(req: LLMConfigDTO): +async def save_llm_config(req: LLMConfigDTO): """保存 LLM 配置""" try: # 1. Update In-Memory Config (Partial update) @@ -1075,6 +1158,24 @@ def save_llm_config(req: LLMConfigDTO): OmegaConf.save(conf, local_config_path) + # ===== 如果之前 LLM 连接失败,现在恢复运行 ===== + if game_instance.get("llm_check_failed", False): + print("检测到之前 LLM 连接失败,正在恢复 Simulator 运行...") + + # 清除失败标志并恢复运行 + game_instance["llm_check_failed"] = False + game_instance["llm_error_message"] = "" + game_instance["is_paused"] = False + + print("Simulator 已恢复运行 ✓") + + # 通知所有客户端刷新 + await manager.broadcast({ + "type": "game_reinitialized", + "message": "LLM 配置成功,游戏已恢复运行" + }) + # ===== 恢复运行结束 ===== + return {"status": "ok", "message": "配置已保存"} except Exception as e: import traceback @@ -1113,22 +1214,6 @@ def api_save_game(req: SaveGameRequest): if not world or not sim: raise HTTPException(status_code=503, detail="Game not initialized") - # 这里的 existed_sects 需要从 world 或者 sim 中获取,目前简单起见, - # 我们可以遍历地图上的宗门总部,或者如果全局有保存最好。 - # 由于 init_game 只有一次,我们需要从 world 中反推 active sects - # 但 save_game 签名里的 existed_sects 主要是为了记录 id。 - # 实际上 world.map.regions 中包含了宗门总部信息。 - # 或者更简单的:直接从 sects_by_id 取所有? 不太对。 - # 让我们看看 save_game 实现:它主要是存 id。 - # 我们可以传入空列表,如果在 load 时能容忍的话。 - # 实际上 load_game 里:existed_sects = [sects_by_id[sid] for sid in existed_sect_ids] - # 所以 save 时如果不传,load 时就拿不到。 - # 临时方案:遍历所有宗门,如果它有领地或者有人,就算存在。 - # 或者更粗暴:CONFIG.game.sect_num 如果没变,可以不管。 - # 最好是 world 对象上能挂载 existed_sects。 - # 暂时方案:传入所有宗门作为 existed_sects (全集),虽然有点浪费,但不丢数据。 - # 更好的方案:修改 init_game,把 existed_sects 挂载到 world 上。 - # 尝试从 world 属性获取(如果以后添加了) existed_sects = getattr(world, "existed_sects", []) if not existed_sects: diff --git a/src/utils/config.py b/src/utils/config.py index c8a7641..cd8d7ce 100644 --- a/src/utils/config.py +++ b/src/utils/config.py @@ -1,9 +1,7 @@ """ 配置管理模块 使用OmegaConf读取config.yml和local_config.yml -local_config.yml的优先级更高 """ - from pathlib import Path from omegaconf import OmegaConf @@ -34,10 +32,11 @@ def load_config(): config = OmegaConf.merge(base_config, local_config) # 把paths下的所有值pathlib化 - for key, value in config.paths.items(): - config.paths[key] = Path(value) + if hasattr(config, "paths"): + for key, value in config.paths.items(): + config.paths[key] = Path(value) return config # 导出配置对象 -CONFIG = load_config() \ No newline at end of file +CONFIG = load_config() diff --git a/src/utils/llm/client.py b/src/utils/llm/client.py index d818a73..1d7944d 100644 --- a/src/utils/llm/client.py +++ b/src/utils/llm/client.py @@ -173,7 +173,7 @@ async def call_llm_with_task_name( return await call_llm_with_template(template_path, infos, mode, max_retries) -def test_connectivity(mode: LLMMode = LLMMode.NORMAL, config: Optional[LLMConfig] = None) -> bool: +def test_connectivity(mode: LLMMode = LLMMode.NORMAL, config: Optional[LLMConfig] = None) -> tuple[bool, str]: """ 测试 LLM 服务连通性 (同步版本) @@ -182,7 +182,7 @@ def test_connectivity(mode: LLMMode = LLMMode.NORMAL, config: Optional[LLMConfig config: 直接使用该配置进行测试 Returns: - bool: 连接成功返回 True,失败返回 False + tuple[bool, str]: (是否成功, 错误信息),成功时错误信息为空字符串 """ try: if config is None: @@ -199,7 +199,22 @@ def test_connectivity(mode: LLMMode = LLMMode.NORMAL, config: Optional[LLMConfig else: # 直接调用 requests 实现 _call_with_requests(config, "test") - return True + return True, "" except Exception as e: - print(f"Connectivity test failed: {e}") - return False + error_msg = str(e) + print(f"Connectivity test failed: {error_msg}") + + # 解析常见错误并提供友好提示 + if "401" in error_msg or "invalid_api_key" in error_msg or "Incorrect API key" in error_msg: + return False, "API Key 无效,请检查您的密钥是否正确" + elif "403" in error_msg or "Forbidden" in error_msg: + return False, "访问被拒绝,请检查您的权限或配额" + elif "404" in error_msg: + return False, "服务地址不存在,请检查 Base URL 是否正确" + elif "timeout" in error_msg.lower(): + return False, "连接超时,请检查网络连接或服务地址" + elif "Connection" in error_msg or "connect" in error_msg.lower(): + return False, "无法连接到服务器,请检查 Base URL 和网络" + else: + # 返回原始错误信息 + return False, error_msg diff --git a/src/utils/llm/config.py b/src/utils/llm/config.py index b1f669c..ff96d88 100644 --- a/src/utils/llm/config.py +++ b/src/utils/llm/config.py @@ -2,13 +2,13 @@ from enum import Enum from dataclasses import dataclass -import os - +from src.utils.config import CONFIG class LLMMode(str, Enum): """LLM 调用模式""" NORMAL = "normal" FAST = "fast" + DEFAULT = "default" @dataclass(frozen=True) @@ -21,7 +21,7 @@ class LLMConfig: @classmethod def from_mode(cls, mode: LLMMode) -> 'LLMConfig': """ - 根据模式创建配置 + 根据模式创建配置,从 CONFIG 读取 Args: mode: LLM 调用模式 @@ -29,36 +29,46 @@ class LLMConfig: Returns: LLMConfig: 配置对象 """ - from src.utils.config import CONFIG + # 从 CONFIG 读取配置 + api_key = getattr(CONFIG.llm, "key", "") + base_url = getattr(CONFIG.llm, "base_url", "") # 根据模式选择模型 - model_name = ( - CONFIG.llm.model_name if mode == LLMMode.NORMAL - else CONFIG.llm.fast_model_name - ) - - # API Key 优先从环境变量读取 - api_key = CONFIG.llm.key + model_name = "" + if mode == LLMMode.FAST: + model_name = getattr(CONFIG.llm, "fast_model_name", "") + else: + # NORMAL or DEFAULT fallback + model_name = getattr(CONFIG.llm, "model_name", "") return cls( model_name=model_name, api_key=api_key, - base_url=CONFIG.llm.base_url + base_url=base_url ) def get_task_mode(task_name: str) -> LLMMode: """ - 获取指定任务的 LLM 调用模式 - - Args: - task_name: 任务名称 (配置在 llm.default_modes 下的 key) - - Returns: - LLMMode: 对应的模式,如果未配置则默认返回 NORMAL + 根据任务名称获取 LLM 模式 """ - from src.utils.config import CONFIG + # 从 CONFIG 读取全局模式 + global_mode = getattr(CONFIG.llm, "mode", "default").lower() - # 获取配置的模式字符串,默认 normal - mode_str = getattr(CONFIG.llm.default_modes, task_name, "normal") - return LLMMode(mode_str) + if global_mode == "normal": + return LLMMode.NORMAL + elif global_mode == "fast": + return LLMMode.FAST + + # Default 模式:根据 task_name 从细粒度配置中获取 + # 如果配置了 default_modes,则根据任务名称返回对应模式 + default_modes = getattr(CONFIG.llm, "default_modes", {}) + if default_modes and task_name in default_modes: + task_mode = default_modes[task_name].lower() + if task_mode == "fast": + return LLMMode.FAST + else: + return LLMMode.NORMAL + + # 如果没有配置,默认返回 NORMAL + return LLMMode.NORMAL diff --git a/static/config.yml b/static/config.yml index e5a32cd..9de6201 100644 --- a/static/config.yml +++ b/static/config.yml @@ -2,13 +2,6 @@ meta: version: "1.0.9" llm: - # 目前默认用的是阿里的千问大模型 api。 - # 如果你想用别家的,需要对应修改下面的base_url为对应的模型。 - # 填入对应的密钥,并且修改model_name和fast_model_name为对应的模型。 - key: "你的密钥" - base_url: "https://dashscope.aliyuncs.com/compatible-mode/v1" - model_name: "openai/qwen-plus" # 聪明的模型,负责难的任务 - fast_model_name: "openai/qwen-flash" # 快速的模型,负责简单的任务 default_modes: action_decision: "normal" long_term_objective: "normal" @@ -17,7 +10,6 @@ llm: relation_resolver: "fast" story_teller: "fast" interaction_feedback: "fast" - mode: "default" # default: 使用default_modes中的模式,normal: 均使用normal模式,fast: 均使用fast模式 paths: templates: static/templates/ diff --git a/web/src/App.vue b/web/src/App.vue index cb76b29..f3ecd8e 100644 --- a/web/src/App.vue +++ b/web/src/App.vue @@ -19,6 +19,7 @@ const socketStore = useSocketStore() const showMenu = ref(false) const isManualPaused = ref(false) +const menuDefaultTab = ref<'save' | 'load' | 'create' | 'delete' | 'llm'>('load') onMounted(async () => { // 初始化 Socket 连接 @@ -28,6 +29,15 @@ onMounted(async () => { window.addEventListener('keydown', handleKeydown) }) +// 导出方法供 socket store 调用 +function openLLMConfig() { + menuDefaultTab.value = 'llm' + showMenu.value = true +} + +// 暴露给全局以便 socket store 可以调用 +;(window as any).__openLLMConfig = openLLMConfig + onUnmounted(() => { socketStore.disconnect() window.removeEventListener('keydown', handleKeydown) @@ -115,7 +125,8 @@ watch([showMenu, isManualPaused], ([menuVisible, manualPaused]) => { diff --git a/web/src/api/http.ts b/web/src/api/http.ts index 530f2d1..e6e4742 100644 --- a/web/src/api/http.ts +++ b/web/src/api/http.ts @@ -8,11 +8,13 @@ const API_BASE = import.meta.env.VITE_API_TARGET || ''; export class ApiError extends Error { public status: number; + public response: { data: any }; - constructor(status: number, message: string) { + constructor(status: number, message: string, responseData?: any) { super(message); this.status = status; this.name = 'ApiError'; + this.response = { data: responseData || {} }; } } @@ -21,7 +23,21 @@ async function request(path: string, options: RequestInit = {}): Promise { const response = await fetch(url, options); if (!response.ok) { - throw new ApiError(response.status, `Request failed: ${response.statusText}`); + // 尝试解析错误响应的 JSON + let errorData = null; + let errorMessage = `Request failed: ${response.statusText}`; + + try { + errorData = await response.json(); + // 如果后端返回了 detail 字段,使用它作为错误消息 + if (errorData?.detail) { + errorMessage = errorData.detail; + } + } catch { + // 如果解析失败,使用默认错误消息 + } + + throw new ApiError(response.status, errorMessage, errorData); } // 假设后端总是返回 JSON diff --git a/web/src/components/SystemMenu.vue b/web/src/components/SystemMenu.vue index 8bb04a4..fd4abd9 100644 --- a/web/src/components/SystemMenu.vue +++ b/web/src/components/SystemMenu.vue @@ -7,29 +7,36 @@ import LLMConfigPanel from './game/panels/system/LLMConfigPanel.vue' const props = defineProps<{ visible: boolean + defaultTab?: 'save' | 'load' | 'create' | 'delete' | 'llm' }>() const emit = defineEmits<{ (e: 'close'): void }>() -const activeTab = ref<'save' | 'load' | 'create' | 'delete' | 'llm'>('load') +const activeTab = ref<'save' | 'load' | 'create' | 'delete' | 'llm'>(props.defaultTab || 'load') function switchTab(tab: typeof activeTab.value) { activeTab.value = tab } -// Reset tab when reopening +// 监听 defaultTab 变化 +watch(() => props.defaultTab, (newTab) => { + if (newTab) { + activeTab.value = newTab + } +}) + +// 当菜单打开时,如果有 defaultTab 就使用它 watch(() => props.visible, (val) => { - if (val) { - // Do not reset activeTab to keep user context, or reset if preferred - // activeTab.value = 'load' + if (val && props.defaultTab) { + activeTab.value = props.defaultTab } })