diff --git a/electron/main/ai/conversations.ts b/electron/main/ai/conversations.ts new file mode 100644 index 0000000..a367615 --- /dev/null +++ b/electron/main/ai/conversations.ts @@ -0,0 +1,294 @@ +/** + * AI 对话历史管理模块 + * 在主进程中执行,管理 AI 对话的持久化存储 + */ + +import Database from 'better-sqlite3' +import * as fs from 'fs' +import * as path from 'path' +import { app } from 'electron' + +// AI 数据库存储目录 +let AI_DB_DIR: string | null = null +let AI_DB: Database.Database | null = null + +/** + * 获取 AI 数据库目录 + */ +function getAiDbDir(): string { + if (AI_DB_DIR) return AI_DB_DIR + + try { + const docPath = app.getPath('documents') + AI_DB_DIR = path.join(docPath, 'ChatLab', 'ai') + } catch { + AI_DB_DIR = path.join(process.cwd(), 'ai') + } + + return AI_DB_DIR +} + +/** + * 确保 AI 数据库目录存在 + */ +function ensureAiDbDir(): void { + const dir = getAiDbDir() + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) + } +} + +/** + * 获取 AI 数据库实例(单例) + */ +function getAiDb(): Database.Database { + if (AI_DB) return AI_DB + + ensureAiDbDir() + const dbPath = path.join(getAiDbDir(), 'conversations.db') + AI_DB = new Database(dbPath) + AI_DB.pragma('journal_mode = WAL') + + // 创建表结构 + AI_DB.exec(` + -- AI 对话表 + CREATE TABLE IF NOT EXISTS ai_conversation ( + id TEXT PRIMARY KEY, + session_id TEXT NOT NULL, + title TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL + ); + + -- AI 消息表 + CREATE TABLE IF NOT EXISTS ai_message ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + role TEXT NOT NULL, + content TEXT NOT NULL, + timestamp INTEGER NOT NULL, + data_keywords TEXT, + data_message_count INTEGER, + FOREIGN KEY(conversation_id) REFERENCES ai_conversation(id) ON DELETE CASCADE + ); + + -- 索引 + CREATE INDEX IF NOT EXISTS idx_ai_conversation_session ON ai_conversation(session_id); + CREATE INDEX IF NOT EXISTS idx_ai_message_conversation ON ai_message(conversation_id); + `) + + return AI_DB +} + +/** + * 关闭 AI 数据库连接 + */ +export function closeAiDatabase(): void { + if (AI_DB) { + AI_DB.close() + AI_DB = null + } +} + +// ==================== 类型定义 ==================== + +/** + * AI 对话类型 + */ +export interface AIConversation { + id: string + sessionId: string + title: string | null + createdAt: number + updatedAt: number +} + +/** + * AI 消息类型 + */ +export interface AIMessage { + id: string + conversationId: string + role: 'user' | 'assistant' + content: string + timestamp: number + dataKeywords?: string[] + dataMessageCount?: number +} + +// ==================== 对话管理 ==================== + +/** + * 创建新对话 + */ +export function createConversation(sessionId: string, title?: string): AIConversation { + const db = getAiDb() + const now = Math.floor(Date.now() / 1000) + const id = `conv_${Date.now()}_${Math.random().toString(36).slice(2, 8)}` + + db.prepare(` + INSERT INTO ai_conversation (id, session_id, title, created_at, updated_at) + VALUES (?, ?, ?, ?, ?) + `).run(id, sessionId, title || null, now, now) + + return { + id, + sessionId, + title: title || null, + createdAt: now, + updatedAt: now, + } +} + +/** + * 获取会话的所有对话列表 + */ +export function getConversations(sessionId: string): AIConversation[] { + const db = getAiDb() + + const rows = db.prepare(` + SELECT id, session_id as sessionId, title, created_at as createdAt, updated_at as updatedAt + FROM ai_conversation + WHERE session_id = ? + ORDER BY updated_at DESC + `).all(sessionId) as AIConversation[] + + return rows +} + +/** + * 获取单个对话 + */ +export function getConversation(conversationId: string): AIConversation | null { + const db = getAiDb() + + const row = db.prepare(` + SELECT id, session_id as sessionId, title, created_at as createdAt, updated_at as updatedAt + FROM ai_conversation + WHERE id = ? + `).get(conversationId) as AIConversation | undefined + + return row || null +} + +/** + * 更新对话标题 + */ +export function updateConversationTitle(conversationId: string, title: string): boolean { + const db = getAiDb() + const now = Math.floor(Date.now() / 1000) + + const result = db.prepare(` + UPDATE ai_conversation + SET title = ?, updated_at = ? + WHERE id = ? + `).run(title, now, conversationId) + + return result.changes > 0 +} + +/** + * 删除对话(级联删除消息) + */ +export function deleteConversation(conversationId: string): boolean { + const db = getAiDb() + + // 先删除消息 + db.prepare('DELETE FROM ai_message WHERE conversation_id = ?').run(conversationId) + // 再删除对话 + const result = db.prepare('DELETE FROM ai_conversation WHERE id = ?').run(conversationId) + + return result.changes > 0 +} + +// ==================== 消息管理 ==================== + +/** + * 添加消息到对话 + */ +export function addMessage( + conversationId: string, + role: 'user' | 'assistant', + content: string, + dataKeywords?: string[], + dataMessageCount?: number +): AIMessage { + const db = getAiDb() + const now = Math.floor(Date.now() / 1000) + const id = `msg_${Date.now()}_${Math.random().toString(36).slice(2, 8)}` + + db.prepare(` + INSERT INTO ai_message (id, conversation_id, role, content, timestamp, data_keywords, data_message_count) + VALUES (?, ?, ?, ?, ?, ?, ?) + `).run( + id, + conversationId, + role, + content, + now, + dataKeywords ? JSON.stringify(dataKeywords) : null, + dataMessageCount ?? null + ) + + // 更新对话的 updated_at + db.prepare('UPDATE ai_conversation SET updated_at = ? WHERE id = ?').run(now, conversationId) + + return { + id, + conversationId, + role, + content, + timestamp: now, + dataKeywords, + dataMessageCount, + } +} + +/** + * 获取对话的所有消息 + */ +export function getMessages(conversationId: string): AIMessage[] { + const db = getAiDb() + + const rows = db.prepare(` + SELECT + id, + conversation_id as conversationId, + role, + content, + timestamp, + data_keywords as dataKeywords, + data_message_count as dataMessageCount + FROM ai_message + WHERE conversation_id = ? + ORDER BY timestamp ASC + `).all(conversationId) as Array<{ + id: string + conversationId: string + role: string + content: string + timestamp: number + dataKeywords: string | null + dataMessageCount: number | null + }> + + return rows.map((row) => ({ + id: row.id, + conversationId: row.conversationId, + role: row.role as 'user' | 'assistant', + content: row.content, + timestamp: row.timestamp, + dataKeywords: row.dataKeywords ? JSON.parse(row.dataKeywords) : undefined, + dataMessageCount: row.dataMessageCount ?? undefined, + })) +} + +/** + * 删除单条消息 + */ +export function deleteMessage(messageId: string): boolean { + const db = getAiDb() + const result = db.prepare('DELETE FROM ai_message WHERE id = ?').run(messageId) + return result.changes > 0 +} + diff --git a/electron/main/ai/llm/deepseek.ts b/electron/main/ai/llm/deepseek.ts new file mode 100644 index 0000000..fd447e0 --- /dev/null +++ b/electron/main/ai/llm/deepseek.ts @@ -0,0 +1,184 @@ +/** + * DeepSeek LLM Provider + * 使用 OpenAI 兼容的 API 格式 + */ + +import type { + ILLMService, + LLMProvider, + ChatMessage, + ChatOptions, + ChatResponse, + ChatStreamChunk, + ProviderInfo, +} from './types' + +const DEFAULT_BASE_URL = 'https://api.deepseek.com' + +const MODELS = [ + { id: 'deepseek-chat', name: 'DeepSeek Chat', description: '通用对话模型' }, + { id: 'deepseek-coder', name: 'DeepSeek Coder', description: '代码生成模型' }, +] + +export const DEEPSEEK_INFO: ProviderInfo = { + id: 'deepseek', + name: 'DeepSeek', + description: 'DeepSeek AI 大语言模型', + defaultBaseUrl: DEFAULT_BASE_URL, + models: MODELS, +} + +export class DeepSeekService implements ILLMService { + private apiKey: string + private baseUrl: string + private model: string + + constructor(apiKey: string, model?: string, baseUrl?: string) { + this.apiKey = apiKey + this.baseUrl = baseUrl || DEFAULT_BASE_URL + this.model = model || 'deepseek-chat' + } + + getProvider(): LLMProvider { + return 'deepseek' + } + + getModels(): string[] { + return MODELS.map((m) => m.id) + } + + getDefaultModel(): string { + return 'deepseek-chat' + } + + async chat(messages: ChatMessage[], options?: ChatOptions): Promise { + const response = await fetch(`${this.baseUrl}/v1/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + model: this.model, + messages: messages.map((m) => ({ role: m.role, content: m.content })), + temperature: options?.temperature ?? 0.7, + max_tokens: options?.maxTokens ?? 2048, + stream: false, + }), + }) + + if (!response.ok) { + const error = await response.text() + throw new Error(`DeepSeek API error: ${response.status} - ${error}`) + } + + const data = await response.json() + const choice = data.choices?.[0] + + return { + content: choice?.message?.content || '', + finishReason: choice?.finish_reason === 'stop' ? 'stop' : choice?.finish_reason === 'length' ? 'length' : 'error', + usage: data.usage + ? { + promptTokens: data.usage.prompt_tokens, + completionTokens: data.usage.completion_tokens, + totalTokens: data.usage.total_tokens, + } + : undefined, + } + } + + async *chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator { + const response = await fetch(`${this.baseUrl}/v1/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + model: this.model, + messages: messages.map((m) => ({ role: m.role, content: m.content })), + temperature: options?.temperature ?? 0.7, + max_tokens: options?.maxTokens ?? 2048, + stream: true, + }), + }) + + if (!response.ok) { + const error = await response.text() + throw new Error(`DeepSeek API error: ${response.status} - ${error}`) + } + + const reader = response.body?.getReader() + if (!reader) { + throw new Error('Failed to get response reader') + } + + const decoder = new TextDecoder() + let buffer = '' + + try { + while (true) { + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split('\n') + buffer = lines.pop() || '' + + for (const line of lines) { + const trimmed = line.trim() + if (!trimmed || !trimmed.startsWith('data: ')) continue + + const data = trimmed.slice(6) + if (data === '[DONE]') { + yield { content: '', isFinished: true, finishReason: 'stop' } + return + } + + try { + const parsed = JSON.parse(data) + const delta = parsed.choices?.[0]?.delta + const finishReason = parsed.choices?.[0]?.finish_reason + + if (delta?.content) { + yield { + content: delta.content, + isFinished: false, + } + } + + if (finishReason) { + yield { + content: '', + isFinished: true, + finishReason: finishReason === 'stop' ? 'stop' : finishReason === 'length' ? 'length' : 'error', + } + return + } + } catch { + // 忽略解析错误,继续处理下一行 + } + } + } + } finally { + reader.releaseLock() + } + } + + async validateApiKey(): Promise { + try { + // 发送一个简单请求验证 API Key + const response = await fetch(`${this.baseUrl}/v1/models`, { + method: 'GET', + headers: { + Authorization: `Bearer ${this.apiKey}`, + }, + }) + return response.ok + } catch { + return false + } + } +} + diff --git a/electron/main/ai/llm/index.ts b/electron/main/ai/llm/index.ts new file mode 100644 index 0000000..dbb85ac --- /dev/null +++ b/electron/main/ai/llm/index.ts @@ -0,0 +1,220 @@ +/** + * LLM 服务模块入口 + * 提供统一的 LLM 服务管理 + */ + +import * as fs from 'fs' +import * as path from 'path' +import { app } from 'electron' +import type { LLMConfig, LLMProvider, ILLMService, ProviderInfo, ChatMessage, ChatOptions, ChatStreamChunk } from './types' +import { DeepSeekService, DEEPSEEK_INFO } from './deepseek' +import { QwenService, QWEN_INFO } from './qwen' +import { aiLogger } from '../logger' + +// 导出类型 +export * from './types' + +// 所有支持的提供商信息 +export const PROVIDERS: ProviderInfo[] = [DEEPSEEK_INFO, QWEN_INFO] + +// 配置文件路径 +let CONFIG_PATH: string | null = null + +function getConfigPath(): string { + if (CONFIG_PATH) return CONFIG_PATH + + try { + const docPath = app.getPath('documents') + CONFIG_PATH = path.join(docPath, 'ChatLab', 'ai', 'llm-config.json') + } catch { + CONFIG_PATH = path.join(process.cwd(), 'ai', 'llm-config.json') + } + + return CONFIG_PATH +} + +/** + * LLM 配置管理 + */ +export interface StoredConfig { + provider: LLMProvider + apiKey: string + model?: string + maxTokens?: number +} + +/** + * 保存 LLM 配置 + */ +export function saveLLMConfig(config: StoredConfig): void { + const configPath = getConfigPath() + const dir = path.dirname(configPath) + + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) + } + + fs.writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf-8') +} + +/** + * 加载 LLM 配置 + */ +export function loadLLMConfig(): StoredConfig | null { + const configPath = getConfigPath() + + if (!fs.existsSync(configPath)) { + return null + } + + try { + const content = fs.readFileSync(configPath, 'utf-8') + return JSON.parse(content) as StoredConfig + } catch { + return null + } +} + +/** + * 删除 LLM 配置 + */ +export function deleteLLMConfig(): void { + const configPath = getConfigPath() + + if (fs.existsSync(configPath)) { + fs.unlinkSync(configPath) + } +} + +/** + * 检查是否已配置 LLM + */ +export function hasLLMConfig(): boolean { + const config = loadLLMConfig() + return config !== null && !!config.apiKey +} + +/** + * 创建 LLM 服务实例 + */ +export function createLLMService(config: LLMConfig): ILLMService { + switch (config.provider) { + case 'deepseek': + return new DeepSeekService(config.apiKey, config.model, config.baseUrl) + case 'qwen': + return new QwenService(config.apiKey, config.model, config.baseUrl) + default: + throw new Error(`Unknown LLM provider: ${config.provider}`) + } +} + +/** + * 获取当前配置的 LLM 服务实例 + */ +export function getCurrentLLMService(): ILLMService | null { + const config = loadLLMConfig() + if (!config || !config.apiKey) { + return null + } + + return createLLMService({ + provider: config.provider, + apiKey: config.apiKey, + model: config.model, + maxTokens: config.maxTokens, + }) +} + +/** + * 获取提供商信息 + */ +export function getProviderInfo(provider: LLMProvider): ProviderInfo | null { + return PROVIDERS.find((p) => p.id === provider) || null +} + +/** + * 验证 API Key + */ +export async function validateApiKey(provider: LLMProvider, apiKey: string): Promise { + const service = createLLMService({ provider, apiKey }) + return service.validateApiKey() +} + +/** + * 发送聊天请求(使用当前配置) + */ +export async function chat(messages: ChatMessage[], options?: ChatOptions): Promise { + aiLogger.info('LLM', '开始非流式聊天请求', { + messagesCount: messages.length, + firstMessageRole: messages[0]?.role, + firstMessageLength: messages[0]?.content?.length, + options, + }) + + const service = getCurrentLLMService() + if (!service) { + aiLogger.error('LLM', '服务未配置') + throw new Error('LLM 服务未配置,请先在设置中配置 API Key') + } + + aiLogger.info('LLM', `使用提供商: ${service.getProvider()}`) + + try { + const response = await service.chat(messages, options) + aiLogger.info('LLM', '非流式请求成功', { + contentLength: response.content?.length, + finishReason: response.finishReason, + usage: response.usage, + }) + return response.content + } catch (error) { + aiLogger.error('LLM', '非流式请求失败', { error: String(error) }) + throw error + } +} + +/** + * 发送聊天请求(流式,使用当前配置) + */ +export async function* chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator { + aiLogger.info('LLM', '开始流式聊天请求', { + messagesCount: messages.length, + firstMessageRole: messages[0]?.role, + firstMessageLength: messages[0]?.content?.length, + options, + }) + + const service = getCurrentLLMService() + if (!service) { + aiLogger.error('LLM', '服务未配置(流式)') + throw new Error('LLM 服务未配置,请先在设置中配置 API Key') + } + + aiLogger.info('LLM', `使用提供商(流式): ${service.getProvider()}`) + + let chunkCount = 0 + let totalContent = '' + + try { + for await (const chunk of service.chatStream(messages, options)) { + chunkCount++ + totalContent += chunk.content + yield chunk + + if (chunk.isFinished) { + aiLogger.info('LLM', '流式请求完成', { + chunkCount, + totalContentLength: totalContent.length, + finishReason: chunk.finishReason, + }) + } + } + } catch (error) { + aiLogger.error('LLM', '流式请求失败', { + error: String(error), + chunkCountBeforeError: chunkCount, + }) + throw error + } +} + diff --git a/electron/main/ai/llm/qwen.ts b/electron/main/ai/llm/qwen.ts new file mode 100644 index 0000000..52a74ef --- /dev/null +++ b/electron/main/ai/llm/qwen.ts @@ -0,0 +1,185 @@ +/** + * 通义千问 (Qwen) LLM Provider + * 使用阿里云 DashScope 兼容 OpenAI 格式的 API + */ + +import type { + ILLMService, + LLMProvider, + ChatMessage, + ChatOptions, + ChatResponse, + ChatStreamChunk, + ProviderInfo, +} from './types' + +const DEFAULT_BASE_URL = 'https://dashscope.aliyuncs.com/compatible-mode/v1' + +const MODELS = [ + { id: 'qwen-turbo', name: 'Qwen Turbo', description: '通义千问超大规模语言模型,速度快' }, + { id: 'qwen-plus', name: 'Qwen Plus', description: '通义千问超大规模语言模型,效果好' }, + { id: 'qwen-max', name: 'Qwen Max', description: '通义千问千亿级别超大规模语言模型' }, +] + +export const QWEN_INFO: ProviderInfo = { + id: 'qwen', + name: '通义千问', + description: '阿里云通义千问大语言模型', + defaultBaseUrl: DEFAULT_BASE_URL, + models: MODELS, +} + +export class QwenService implements ILLMService { + private apiKey: string + private baseUrl: string + private model: string + + constructor(apiKey: string, model?: string, baseUrl?: string) { + this.apiKey = apiKey + this.baseUrl = baseUrl || DEFAULT_BASE_URL + this.model = model || 'qwen-turbo' + } + + getProvider(): LLMProvider { + return 'qwen' + } + + getModels(): string[] { + return MODELS.map((m) => m.id) + } + + getDefaultModel(): string { + return 'qwen-turbo' + } + + async chat(messages: ChatMessage[], options?: ChatOptions): Promise { + const response = await fetch(`${this.baseUrl}/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + model: this.model, + messages: messages.map((m) => ({ role: m.role, content: m.content })), + temperature: options?.temperature ?? 0.7, + max_tokens: options?.maxTokens ?? 2048, + stream: false, + }), + }) + + if (!response.ok) { + const error = await response.text() + throw new Error(`Qwen API error: ${response.status} - ${error}`) + } + + const data = await response.json() + const choice = data.choices?.[0] + + return { + content: choice?.message?.content || '', + finishReason: choice?.finish_reason === 'stop' ? 'stop' : choice?.finish_reason === 'length' ? 'length' : 'error', + usage: data.usage + ? { + promptTokens: data.usage.prompt_tokens, + completionTokens: data.usage.completion_tokens, + totalTokens: data.usage.total_tokens, + } + : undefined, + } + } + + async *chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator { + const response = await fetch(`${this.baseUrl}/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + model: this.model, + messages: messages.map((m) => ({ role: m.role, content: m.content })), + temperature: options?.temperature ?? 0.7, + max_tokens: options?.maxTokens ?? 2048, + stream: true, + }), + }) + + if (!response.ok) { + const error = await response.text() + throw new Error(`Qwen API error: ${response.status} - ${error}`) + } + + const reader = response.body?.getReader() + if (!reader) { + throw new Error('Failed to get response reader') + } + + const decoder = new TextDecoder() + let buffer = '' + + try { + while (true) { + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split('\n') + buffer = lines.pop() || '' + + for (const line of lines) { + const trimmed = line.trim() + if (!trimmed || !trimmed.startsWith('data: ')) continue + + const data = trimmed.slice(6) + if (data === '[DONE]') { + yield { content: '', isFinished: true, finishReason: 'stop' } + return + } + + try { + const parsed = JSON.parse(data) + const delta = parsed.choices?.[0]?.delta + const finishReason = parsed.choices?.[0]?.finish_reason + + if (delta?.content) { + yield { + content: delta.content, + isFinished: false, + } + } + + if (finishReason) { + yield { + content: '', + isFinished: true, + finishReason: finishReason === 'stop' ? 'stop' : finishReason === 'length' ? 'length' : 'error', + } + return + } + } catch { + // 忽略解析错误,继续处理下一行 + } + } + } + } finally { + reader.releaseLock() + } + } + + async validateApiKey(): Promise { + try { + // 发送一个简单请求验证 API Key + const response = await fetch(`${this.baseUrl}/models`, { + method: 'GET', + headers: { + Authorization: `Bearer ${this.apiKey}`, + }, + }) + return response.ok + } catch { + return false + } + } +} + diff --git a/electron/main/ai/llm/types.ts b/electron/main/ai/llm/types.ts new file mode 100644 index 0000000..7ab3c4a --- /dev/null +++ b/electron/main/ai/llm/types.ts @@ -0,0 +1,109 @@ +/** + * LLM 服务类型定义 + */ + +/** + * 支持的 LLM 提供商 + */ +export type LLMProvider = 'deepseek' | 'qwen' + +/** + * LLM 配置 + */ +export interface LLMConfig { + provider: LLMProvider + apiKey: string + model?: string + baseUrl?: string + maxTokens?: number +} + +/** + * 聊天消息 + */ +export interface ChatMessage { + role: 'system' | 'user' | 'assistant' + content: string +} + +/** + * 聊天请求选项 + */ +export interface ChatOptions { + temperature?: number + maxTokens?: number + stream?: boolean +} + +/** + * 非流式响应 + */ +export interface ChatResponse { + content: string + finishReason: 'stop' | 'length' | 'error' + usage?: { + promptTokens: number + completionTokens: number + totalTokens: number + } +} + +/** + * 流式响应 chunk + */ +export interface ChatStreamChunk { + content: string + isFinished: boolean + finishReason?: 'stop' | 'length' | 'error' +} + +/** + * LLM 服务接口 + */ +export interface ILLMService { + /** + * 获取提供商名称 + */ + getProvider(): LLMProvider + + /** + * 获取可用模型列表 + */ + getModels(): string[] + + /** + * 获取默认模型 + */ + getDefaultModel(): string + + /** + * 发送聊天请求(非流式) + */ + chat(messages: ChatMessage[], options?: ChatOptions): Promise + + /** + * 发送聊天请求(流式) + */ + chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator + + /** + * 验证 API Key 是否有效 + */ + validateApiKey(): Promise +} + +/** + * 提供商信息 + */ +export interface ProviderInfo { + id: LLMProvider + name: string + description: string + defaultBaseUrl: string + models: Array<{ + id: string + name: string + description?: string + }> +} + diff --git a/electron/main/ai/logger.ts b/electron/main/ai/logger.ts new file mode 100644 index 0000000..c1c5b62 --- /dev/null +++ b/electron/main/ai/logger.ts @@ -0,0 +1,162 @@ +/** + * AI 日志模块 + * 将 AI 相关操作日志写入本地文件 + */ + +import * as fs from 'fs' +import * as path from 'path' +import { app } from 'electron' + +// 日志目录 +let LOG_DIR: string | null = null +let LOG_FILE: string | null = null +let logStream: fs.WriteStream | null = null + +/** + * 获取日志目录 + */ +function getLogDir(): string { + if (LOG_DIR) return LOG_DIR + + try { + const docPath = app.getPath('documents') + LOG_DIR = path.join(docPath, 'ChatLab', 'logs') + } catch { + LOG_DIR = path.join(process.cwd(), 'logs') + } + + return LOG_DIR +} + +/** + * 确保日志目录存在 + */ +function ensureLogDir(): void { + const dir = getLogDir() + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }) + } +} + +/** + * 获取当前日志文件路径 + */ +function getLogFilePath(): string { + if (LOG_FILE) return LOG_FILE + + ensureLogDir() + const date = new Date().toISOString().split('T')[0] + LOG_FILE = path.join(getLogDir(), `ai_${date}.log`) + + return LOG_FILE +} + +/** + * 获取日志写入流 + */ +function getLogStream(): fs.WriteStream { + if (logStream) return logStream + + const filePath = getLogFilePath() + logStream = fs.createWriteStream(filePath, { flags: 'a', encoding: 'utf-8' }) + + return logStream +} + +/** + * 格式化时间戳 + */ +function formatTimestamp(): string { + return new Date().toISOString() +} + +/** + * 日志级别 + */ +type LogLevel = 'DEBUG' | 'INFO' | 'WARN' | 'ERROR' + +/** + * 写入日志 + */ +function writeLog(level: LogLevel, category: string, message: string, data?: any): void { + const timestamp = formatTimestamp() + let logLine = `[${timestamp}] [${level}] [${category}] ${message}` + + if (data !== undefined) { + try { + const dataStr = typeof data === 'string' ? data : JSON.stringify(data, null, 2) + logLine += `\n${dataStr}` + } catch { + logLine += `\n[无法序列化的数据]` + } + } + + logLine += '\n' + + // 写入文件 + try { + const stream = getLogStream() + stream.write(logLine) + } catch (error) { + console.error('[AILogger] 写入日志失败:', error) + } + + // 同时输出到控制台 + console.log(`[AI] ${logLine.trim()}`) +} + +/** + * AI 日志对象 + */ +export const aiLogger = { + debug(category: string, message: string, data?: any) { + writeLog('DEBUG', category, message, data) + }, + + info(category: string, message: string, data?: any) { + writeLog('INFO', category, message, data) + }, + + warn(category: string, message: string, data?: any) { + writeLog('WARN', category, message, data) + }, + + error(category: string, message: string, data?: any) { + writeLog('ERROR', category, message, data) + }, + + /** + * 关闭日志流 + */ + close() { + if (logStream) { + logStream.end() + logStream = null + } + }, + + /** + * 获取日志文件路径 + */ + getLogPath(): string { + return getLogFilePath() + }, +} + +// 导出便捷函数 +export function logAI(message: string, data?: any) { + aiLogger.info('AI', message, data) +} + +export function logLLM(message: string, data?: any) { + aiLogger.info('LLM', message, data) +} + +export function logSearch(message: string, data?: any) { + aiLogger.info('Search', message, data) +} + +export function logRAG(message: string, data?: any) { + aiLogger.info('RAG', message, data) +} + diff --git a/electron/main/ipcMain.ts b/electron/main/ipcMain.ts index 9b4bf73..e029fb0 100644 --- a/electron/main/ipcMain.ts +++ b/electron/main/ipcMain.ts @@ -13,6 +13,12 @@ import { detectFormat, type ParseProgress } from './parser' // 导入合并模块 import * as merger from './merger' import { deleteTempDatabase, cleanupAllTempDatabases } from './merger/tempCache' +// 导入 AI 对话管理模块 +import * as aiConversations from './ai/conversations' +// 导入 LLM 服务模块 +import * as llm from './ai/llm' +// 导入 AI 日志模块 +import { aiLogger } from './ai/logger' import type { MergeParams } from '../../src/types/chat' console.log('[IpcMain] Database, Worker and Parser modules imported') @@ -718,6 +724,288 @@ const mainIpcMain = (win: BrowserWindow) => { throw error } }) + + // ==================== AI 功能 ==================== + + /** + * 搜索消息(关键词搜索) + */ + ipcMain.handle( + 'ai:searchMessages', + async (_, sessionId: string, keywords: string[], filter?: { startTs?: number; endTs?: number }, limit?: number, offset?: number) => { + aiLogger.info('IPC', '收到搜索消息请求', { + sessionId, + keywords, + filter, + limit, + offset, + }) + try { + const result = await worker.searchMessages(sessionId, keywords, filter, limit, offset) + aiLogger.info('IPC', '搜索消息完成', { + total: result.total, + returned: result.messages.length, + }) + return result + } catch (error) { + aiLogger.error('IPC', '搜索消息失败', { error: String(error) }) + console.error('搜索消息失败:', error) + return { messages: [], total: 0 } + } + } + ) + + /** + * 获取消息上下文 + */ + ipcMain.handle('ai:getMessageContext', async (_, sessionId: string, messageId: number, contextSize?: number) => { + try { + return await worker.getMessageContext(sessionId, messageId, contextSize) + } catch (error) { + console.error('获取消息上下文失败:', error) + return [] + } + }) + + /** + * 创建 AI 对话 + */ + ipcMain.handle('ai:createConversation', async (_, sessionId: string, title?: string) => { + try { + return aiConversations.createConversation(sessionId, title) + } catch (error) { + console.error('创建 AI 对话失败:', error) + throw error + } + }) + + /** + * 获取会话的所有 AI 对话列表 + */ + ipcMain.handle('ai:getConversations', async (_, sessionId: string) => { + try { + return aiConversations.getConversations(sessionId) + } catch (error) { + console.error('获取 AI 对话列表失败:', error) + return [] + } + }) + + /** + * 获取单个 AI 对话 + */ + ipcMain.handle('ai:getConversation', async (_, conversationId: string) => { + try { + return aiConversations.getConversation(conversationId) + } catch (error) { + console.error('获取 AI 对话失败:', error) + return null + } + }) + + /** + * 更新 AI 对话标题 + */ + ipcMain.handle('ai:updateConversationTitle', async (_, conversationId: string, title: string) => { + try { + return aiConversations.updateConversationTitle(conversationId, title) + } catch (error) { + console.error('更新 AI 对话标题失败:', error) + return false + } + }) + + /** + * 删除 AI 对话 + */ + ipcMain.handle('ai:deleteConversation', async (_, conversationId: string) => { + try { + return aiConversations.deleteConversation(conversationId) + } catch (error) { + console.error('删除 AI 对话失败:', error) + return false + } + }) + + /** + * 添加 AI 消息 + */ + ipcMain.handle( + 'ai:addMessage', + async (_, conversationId: string, role: 'user' | 'assistant', content: string, dataKeywords?: string[], dataMessageCount?: number) => { + try { + return aiConversations.addMessage(conversationId, role, content, dataKeywords, dataMessageCount) + } catch (error) { + console.error('添加 AI 消息失败:', error) + throw error + } + } + ) + + /** + * 获取 AI 对话的所有消息 + */ + ipcMain.handle('ai:getMessages', async (_, conversationId: string) => { + try { + return aiConversations.getMessages(conversationId) + } catch (error) { + console.error('获取 AI 消息失败:', error) + return [] + } + }) + + /** + * 删除 AI 消息 + */ + ipcMain.handle('ai:deleteMessage', async (_, messageId: string) => { + try { + return aiConversations.deleteMessage(messageId) + } catch (error) { + console.error('删除 AI 消息失败:', error) + return false + } + }) + + // ==================== LLM 服务 ==================== + + /** + * 获取所有支持的 LLM 提供商 + */ + ipcMain.handle('llm:getProviders', async () => { + return llm.PROVIDERS + }) + + /** + * 获取当前 LLM 配置 + */ + ipcMain.handle('llm:getConfig', async () => { + const config = llm.loadLLMConfig() + if (!config) return null + // 不返回完整的 API Key,只返回脱敏版本 + return { + provider: config.provider, + apiKey: config.apiKey ? `${config.apiKey.slice(0, 8)}...${config.apiKey.slice(-4)}` : '', + apiKeySet: !!config.apiKey, + model: config.model, + maxTokens: config.maxTokens, + } + }) + + /** + * 保存 LLM 配置 + */ + ipcMain.handle('llm:saveConfig', async (_, config: { provider: llm.LLMProvider; apiKey: string; model?: string; maxTokens?: number }) => { + try { + llm.saveLLMConfig(config) + return { success: true } + } catch (error) { + console.error('保存 LLM 配置失败:', error) + return { success: false, error: String(error) } + } + }) + + /** + * 删除 LLM 配置 + */ + ipcMain.handle('llm:deleteConfig', async () => { + try { + llm.deleteLLMConfig() + return true + } catch (error) { + console.error('删除 LLM 配置失败:', error) + return false + } + }) + + /** + * 验证 API Key + */ + ipcMain.handle('llm:validateApiKey', async (_, provider: llm.LLMProvider, apiKey: string) => { + try { + return await llm.validateApiKey(provider, apiKey) + } catch (error) { + console.error('验证 API Key 失败:', error) + return false + } + }) + + /** + * 检查是否已配置 LLM + */ + ipcMain.handle('llm:hasConfig', async () => { + return llm.hasLLMConfig() + }) + + /** + * 发送 LLM 聊天请求(非流式) + */ + ipcMain.handle('llm:chat', async (_, messages: llm.ChatMessage[], options?: llm.ChatOptions) => { + aiLogger.info('IPC', '收到非流式 LLM 请求', { + messagesCount: messages.length, + firstMsgRole: messages[0]?.role, + firstMsgContentLen: messages[0]?.content?.length, + options, + }) + try { + const response = await llm.chat(messages, options) + aiLogger.info('IPC', '非流式 LLM 请求成功', { responseLength: response.length }) + return { success: true, content: response } + } catch (error) { + aiLogger.error('IPC', '非流式 LLM 请求失败', { error: String(error) }) + console.error('LLM 聊天失败:', error) + return { success: false, error: String(error) } + } + }) + + /** + * 发送 LLM 聊天请求(流式) + * 使用 IPC 事件发送流式数据 + */ + ipcMain.handle('llm:chatStream', async (_, requestId: string, messages: llm.ChatMessage[], options?: llm.ChatOptions) => { + aiLogger.info('IPC', `收到流式聊天请求: ${requestId}`, { + messagesCount: messages.length, + options, + }) + + try { + const generator = llm.chatStream(messages, options) + aiLogger.info('IPC', `创建流式生成器: ${requestId}`) + + // 异步处理流式响应 + ;(async () => { + let chunkIndex = 0 + try { + aiLogger.info('IPC', `开始迭代流式响应: ${requestId}`) + for await (const chunk of generator) { + chunkIndex++ + aiLogger.debug('IPC', `发送 chunk #${chunkIndex}: ${requestId}`, { + contentLength: chunk.content?.length, + isFinished: chunk.isFinished, + finishReason: chunk.finishReason, + }) + win.webContents.send('llm:streamChunk', { requestId, chunk }) + } + aiLogger.info('IPC', `流式响应完成: ${requestId}`, { totalChunks: chunkIndex }) + } catch (error) { + aiLogger.error('IPC', `流式响应出错: ${requestId}`, { + error: String(error), + chunkIndex, + }) + win.webContents.send('llm:streamChunk', { + requestId, + chunk: { content: '', isFinished: true, finishReason: 'error' }, + error: String(error), + }) + } + })() + + return { success: true } + } catch (error) { + aiLogger.error('IPC', `创建流式请求失败: ${requestId}`, { error: String(error) }) + console.error('LLM 流式聊天失败:', error) + return { success: false, error: String(error) } + } + }) } export default mainIpcMain diff --git a/electron/main/worker/dbWorker.ts b/electron/main/worker/dbWorker.ts index fd9cc19..039b878 100644 --- a/electron/main/worker/dbWorker.ts +++ b/electron/main/worker/dbWorker.ts @@ -33,6 +33,8 @@ import { getLaughAnalysis, getMemeBattleAnalysis, getCheckInAnalysis, + searchMessages, + getMessageContext, } from './query' import { parseFile, detectFormat } from '../parser' import { streamImport, streamParseFileInfo } from './import' @@ -109,6 +111,10 @@ const syncHandlers: Record any> = { getLaughAnalysis: (p) => getLaughAnalysis(p.sessionId, p.filter, p.keywords), getMemeBattleAnalysis: (p) => getMemeBattleAnalysis(p.sessionId, p.filter), getCheckInAnalysis: (p) => getCheckInAnalysis(p.sessionId, p.filter), + + // AI 查询 + searchMessages: (p) => searchMessages(p.sessionId, p.keywords, p.filter, p.limit, p.offset), + getMessageContext: (p) => getMessageContext(p.sessionId, p.messageId, p.contextSize), } // 异步消息处理器(流式操作) diff --git a/electron/main/worker/index.ts b/electron/main/worker/index.ts index 86e0cba..2639809 100644 --- a/electron/main/worker/index.ts +++ b/electron/main/worker/index.ts @@ -36,4 +36,9 @@ export { // 流式导入 API streamImport, streamParseFileInfo, + // AI 查询 API + searchMessages, + getMessageContext, } from './workerManager' + +export type { SearchMessageResult } from './workerManager' diff --git a/electron/main/worker/query/ai.ts b/electron/main/worker/query/ai.ts new file mode 100644 index 0000000..b4c37d3 --- /dev/null +++ b/electron/main/worker/query/ai.ts @@ -0,0 +1,128 @@ +/** + * AI 查询模块 + * 提供关键词搜索功能(在 Worker 线程中执行) + */ + +import { openDatabase, buildTimeFilter, type TimeFilter } from '../core' + +// ==================== 消息搜索 ==================== + +/** + * 搜索消息结果类型 + */ +export interface SearchMessageResult { + id: number + senderName: string + senderPlatformId: string + content: string + timestamp: number + type: number +} + +/** + * 关键词搜索消息 + * @param sessionId 会话 ID + * @param keywords 关键词数组(OR 逻辑) + * @param filter 时间过滤器 + * @param limit 返回数量限制 + * @param offset 偏移量(分页) + */ +export function searchMessages( + sessionId: string, + keywords: string[], + filter?: TimeFilter, + limit: number = 20, + offset: number = 0 +): { messages: SearchMessageResult[]; total: number } { + const db = openDatabase(sessionId) + if (!db) return { messages: [], total: 0 } + + // 构建关键词条件(OR 逻辑) + const keywordConditions = keywords.map(() => `msg.content LIKE ?`).join(' OR ') + const keywordParams = keywords.map((k) => `%${k}%`) + + // 构建时间过滤条件 + const { clause: timeClause, params: timeParams } = buildTimeFilter(filter) + const timeCondition = timeClause ? timeClause.replace('WHERE', 'AND') : '' + + // 排除系统消息 + const systemFilter = "AND m.name != '系统消息'" + + // 查询总数 + const countSql = ` + SELECT COUNT(*) as total + FROM message msg + JOIN member m ON msg.sender_id = m.id + WHERE (${keywordConditions}) + ${timeCondition} + ${systemFilter} + ` + const totalRow = db.prepare(countSql).get(...keywordParams, ...timeParams) as { total: number } + const total = totalRow?.total || 0 + + // 查询消息 + const sql = ` + SELECT + msg.id, + m.name as senderName, + m.platform_id as senderPlatformId, + msg.content, + msg.ts as timestamp, + msg.type + FROM message msg + JOIN member m ON msg.sender_id = m.id + WHERE (${keywordConditions}) + ${timeCondition} + ${systemFilter} + ORDER BY msg.ts DESC + LIMIT ? OFFSET ? + ` + + const rows = db.prepare(sql).all(...keywordParams, ...timeParams, limit, offset) as SearchMessageResult[] + + return { messages: rows, total } +} + +/** + * 获取消息上下文(指定消息前后的消息) + */ +export function getMessageContext( + sessionId: string, + messageId: number, + contextSize: number = 5 +): SearchMessageResult[] { + const db = openDatabase(sessionId) + if (!db) return [] + + // 获取目标消息的时间戳 + const targetMsg = db.prepare('SELECT ts FROM message WHERE id = ?').get(messageId) as { ts: number } | undefined + if (!targetMsg) return [] + + // 获取前后消息 + const sql = ` + SELECT + msg.id, + m.name as senderName, + m.platform_id as senderPlatformId, + msg.content, + msg.ts as timestamp, + msg.type + FROM message msg + JOIN member m ON msg.sender_id = m.id + WHERE m.name != '系统消息' + AND msg.ts BETWEEN ? AND ? + ORDER BY msg.ts ASC + LIMIT ? + ` + + // 获取前后 contextSize 秒的消息(假设平均每秒 1 条消息) + const timeWindow = contextSize * 60 // 前后各 contextSize 分钟 + const rows = db.prepare(sql).all( + targetMsg.ts - timeWindow, + targetMsg.ts + timeWindow, + contextSize * 2 + 1 + ) as SearchMessageResult[] + + return rows +} + diff --git a/electron/main/worker/query/index.ts b/electron/main/worker/query/index.ts index 597b8b7..5706281 100644 --- a/electron/main/worker/query/index.ts +++ b/electron/main/worker/query/index.ts @@ -32,3 +32,6 @@ export { getLaughAnalysis, } from './advanced' +// AI 查询 +export { searchMessages, getMessageContext } from './ai' + diff --git a/electron/main/worker/workerManager.ts b/electron/main/worker/workerManager.ts index 6eb8cf0..e4282f5 100644 --- a/electron/main/worker/workerManager.ts +++ b/electron/main/worker/workerManager.ts @@ -350,3 +350,38 @@ export async function streamImport( export function getDbDirectory(): string { return getDbDir() } + +// ==================== AI 查询 API ==================== + +export interface SearchMessageResult { + id: number + senderName: string + senderPlatformId: string + content: string + timestamp: number + type: number +} + +/** + * 关键词搜索消息 + */ +export async function searchMessages( + sessionId: string, + keywords: string[], + filter?: any, + limit?: number, + offset?: number +): Promise<{ messages: SearchMessageResult[]; total: number }> { + return sendToWorker('searchMessages', { sessionId, keywords, filter, limit, offset }) +} + +/** + * 获取消息上下文 + */ +export async function getMessageContext( + sessionId: string, + messageId: number, + contextSize?: number +): Promise { + return sendToWorker('getMessageContext', { sessionId, messageId, contextSize }) +} diff --git a/electron/preload/index.d.ts b/electron/preload/index.d.ts index 9d298da..b33d2a9 100644 --- a/electron/preload/index.d.ts +++ b/electron/preload/index.d.ts @@ -81,13 +81,121 @@ interface MergeApi { onParseProgress: (callback: (data: { filePath: string; progress: ImportProgress }) => void) => () => void } +// AI 相关类型 +interface SearchMessageResult { + id: number + senderName: string + senderPlatformId: string + content: string + timestamp: number + type: number +} + +interface AIConversation { + id: string + sessionId: string + title: string | null + createdAt: number + updatedAt: number +} + +interface AIMessage { + id: string + conversationId: string + role: 'user' | 'assistant' + content: string + timestamp: number + dataKeywords?: string[] + dataMessageCount?: number +} + +interface AiApi { + searchMessages: ( + sessionId: string, + keywords: string[], + filter?: TimeFilter, + limit?: number, + offset?: number + ) => Promise<{ messages: SearchMessageResult[]; total: number }> + getMessageContext: (sessionId: string, messageId: number, contextSize?: number) => Promise + createConversation: (sessionId: string, title?: string) => Promise + getConversations: (sessionId: string) => Promise + getConversation: (conversationId: string) => Promise + updateConversationTitle: (conversationId: string, title: string) => Promise + deleteConversation: (conversationId: string) => Promise + addMessage: ( + conversationId: string, + role: 'user' | 'assistant', + content: string, + dataKeywords?: string[], + dataMessageCount?: number + ) => Promise + getMessages: (conversationId: string) => Promise + deleteMessage: (messageId: string) => Promise +} + +// LLM 相关类型 +interface LLMProviderInfo { + id: string + name: string + description: string + defaultBaseUrl: string + models: Array<{ id: string; name: string; description?: string }> +} + +interface LLMConfig { + provider: string + apiKey: string + apiKeySet: boolean + model?: string + maxTokens?: number +} + +interface LLMChatMessage { + role: 'system' | 'user' | 'assistant' + content: string +} + +interface LLMChatOptions { + temperature?: number + maxTokens?: number +} + +interface LLMChatStreamChunk { + content: string + isFinished: boolean + finishReason?: 'stop' | 'length' | 'error' +} + +interface LlmApi { + getProviders: () => Promise + getConfig: () => Promise + saveConfig: (config: { + provider: string + apiKey: string + model?: string + maxTokens?: number + }) => Promise<{ success: boolean; error?: string }> + deleteConfig: () => Promise + validateApiKey: (provider: string, apiKey: string) => Promise + hasConfig: () => Promise + chat: (messages: LLMChatMessage[], options?: LLMChatOptions) => Promise<{ success: boolean; content?: string; error?: string }> + chatStream: ( + messages: LLMChatMessage[], + options?: LLMChatOptions, + onChunk?: (chunk: LLMChatStreamChunk) => void + ) => Promise<{ success: boolean; error?: string }> +} + declare global { interface Window { electron: ElectronAPI api: Api chatApi: ChatApi mergeApi: MergeApi + aiApi: AiApi + llmApi: LlmApi } } -export { ChatApi, Api, MergeApi } +export { ChatApi, Api, MergeApi, AiApi, LlmApi, SearchMessageResult, AIConversation, AIMessage, LLMProviderInfo, LLMConfig, LLMChatMessage, LLMChatOptions, LLMChatStreamChunk } diff --git a/electron/preload/index.ts b/electron/preload/index.ts index 0bca0d0..cb6a0f2 100644 --- a/electron/preload/index.ts +++ b/electron/preload/index.ts @@ -42,7 +42,7 @@ const api = { } }, receive: (channel: string, func: (...args: unknown[]) => void) => { - const validChannels = ['show-message', 'chat:importProgress', 'merge:parseProgress'] + const validChannels = ['show-message', 'chat:importProgress', 'merge:parseProgress', 'llm:streamChunk'] if (validChannels.includes(channel)) { // Deliberately strip event as it includes `sender` ipcRenderer.on(channel, (_event, ...args) => func(...args)) @@ -332,6 +332,265 @@ const mergeApi = { }, } +// AI API - AI 功能 +interface SearchMessageResult { + id: number + senderName: string + senderPlatformId: string + content: string + timestamp: number + type: number +} + +interface AIConversation { + id: string + sessionId: string + title: string | null + createdAt: number + updatedAt: number +} + +interface AIMessage { + id: string + conversationId: string + role: 'user' | 'assistant' + content: string + timestamp: number + dataKeywords?: string[] + dataMessageCount?: number +} + +const aiApi = { + /** + * 搜索消息(关键词搜索) + */ + searchMessages: ( + sessionId: string, + keywords: string[], + filter?: { startTs?: number; endTs?: number }, + limit?: number, + offset?: number + ): Promise<{ messages: SearchMessageResult[]; total: number }> => { + return ipcRenderer.invoke('ai:searchMessages', sessionId, keywords, filter, limit, offset) + }, + + /** + * 获取消息上下文 + */ + getMessageContext: (sessionId: string, messageId: number, contextSize?: number): Promise => { + return ipcRenderer.invoke('ai:getMessageContext', sessionId, messageId, contextSize) + }, + + /** + * 创建 AI 对话 + */ + createConversation: (sessionId: string, title?: string): Promise => { + return ipcRenderer.invoke('ai:createConversation', sessionId, title) + }, + + /** + * 获取会话的所有 AI 对话列表 + */ + getConversations: (sessionId: string): Promise => { + return ipcRenderer.invoke('ai:getConversations', sessionId) + }, + + /** + * 获取单个 AI 对话 + */ + getConversation: (conversationId: string): Promise => { + return ipcRenderer.invoke('ai:getConversation', conversationId) + }, + + /** + * 更新 AI 对话标题 + */ + updateConversationTitle: (conversationId: string, title: string): Promise => { + return ipcRenderer.invoke('ai:updateConversationTitle', conversationId, title) + }, + + /** + * 删除 AI 对话 + */ + deleteConversation: (conversationId: string): Promise => { + return ipcRenderer.invoke('ai:deleteConversation', conversationId) + }, + + /** + * 添加 AI 消息 + */ + addMessage: ( + conversationId: string, + role: 'user' | 'assistant', + content: string, + dataKeywords?: string[], + dataMessageCount?: number + ): Promise => { + return ipcRenderer.invoke('ai:addMessage', conversationId, role, content, dataKeywords, dataMessageCount) + }, + + /** + * 获取 AI 对话的所有消息 + */ + getMessages: (conversationId: string): Promise => { + return ipcRenderer.invoke('ai:getMessages', conversationId) + }, + + /** + * 删除 AI 消息 + */ + deleteMessage: (messageId: string): Promise => { + return ipcRenderer.invoke('ai:deleteMessage', messageId) + }, +} + +// LLM API - LLM 服务功能 +interface LLMProvider { + id: string + name: string + description: string + defaultBaseUrl: string + models: Array<{ id: string; name: string; description?: string }> +} + +interface LLMConfig { + provider: string + apiKey: string + apiKeySet: boolean + model?: string + maxTokens?: number +} + +interface ChatMessage { + role: 'system' | 'user' | 'assistant' + content: string +} + +interface ChatOptions { + temperature?: number + maxTokens?: number +} + +interface ChatStreamChunk { + content: string + isFinished: boolean + finishReason?: 'stop' | 'length' | 'error' +} + +const llmApi = { + /** + * 获取所有支持的 LLM 提供商 + */ + getProviders: (): Promise => { + return ipcRenderer.invoke('llm:getProviders') + }, + + /** + * 获取当前 LLM 配置 + */ + getConfig: (): Promise => { + return ipcRenderer.invoke('llm:getConfig') + }, + + /** + * 保存 LLM 配置 + */ + saveConfig: (config: { + provider: string + apiKey: string + model?: string + maxTokens?: number + }): Promise<{ success: boolean; error?: string }> => { + return ipcRenderer.invoke('llm:saveConfig', config) + }, + + /** + * 删除 LLM 配置 + */ + deleteConfig: (): Promise => { + return ipcRenderer.invoke('llm:deleteConfig') + }, + + /** + * 验证 API Key + */ + validateApiKey: (provider: string, apiKey: string): Promise => { + return ipcRenderer.invoke('llm:validateApiKey', provider, apiKey) + }, + + /** + * 检查是否已配置 LLM + */ + hasConfig: (): Promise => { + return ipcRenderer.invoke('llm:hasConfig') + }, + + /** + * 发送 LLM 聊天请求(非流式) + */ + chat: (messages: ChatMessage[], options?: ChatOptions): Promise<{ success: boolean; content?: string; error?: string }> => { + return ipcRenderer.invoke('llm:chat', messages, options) + }, + + /** + * 发送 LLM 聊天请求(流式) + * 返回一个 Promise,该 Promise 在流完成后才 resolve + */ + chatStream: ( + messages: ChatMessage[], + options?: ChatOptions, + onChunk?: (chunk: ChatStreamChunk) => void + ): Promise<{ success: boolean; error?: string }> => { + return new Promise((resolve) => { + const requestId = `llm_${Date.now()}_${Math.random().toString(36).slice(2, 8)}` + console.log('[preload] chatStream 开始,requestId:', requestId) + + const handler = ( + _event: Electron.IpcRendererEvent, + data: { requestId: string; chunk: ChatStreamChunk; error?: string } + ) => { + if (data.requestId === requestId) { + if (data.error) { + console.log('[preload] chatStream 收到错误:', data.error) + if (onChunk) { + onChunk({ content: '', isFinished: true, finishReason: 'error' }) + } + ipcRenderer.removeListener('llm:streamChunk', handler) + resolve({ success: false, error: data.error }) + } else { + if (onChunk) { + onChunk(data.chunk) + } + + // 如果已完成,移除监听器并 resolve + if (data.chunk.isFinished) { + console.log('[preload] chatStream 完成,requestId:', requestId) + ipcRenderer.removeListener('llm:streamChunk', handler) + resolve({ success: true }) + } + } + } + } + + ipcRenderer.on('llm:streamChunk', handler) + + // 发起请求 + ipcRenderer.invoke('llm:chatStream', requestId, messages, options).then((result) => { + console.log('[preload] chatStream invoke 返回:', result) + if (!result.success) { + ipcRenderer.removeListener('llm:streamChunk', handler) + resolve(result) + } + // 如果 success,等待流完成(由 handler 处理 resolve) + }).catch((error) => { + console.error('[preload] chatStream invoke 错误:', error) + ipcRenderer.removeListener('llm:streamChunk', handler) + resolve({ success: false, error: String(error) }) + }) + }) + }, +} + // 扩展 api,添加 dialog 功能 const extendedApi = { ...api, @@ -351,6 +610,8 @@ if (process.contextIsolated) { contextBridge.exposeInMainWorld('api', extendedApi) contextBridge.exposeInMainWorld('chatApi', chatApi) contextBridge.exposeInMainWorld('mergeApi', mergeApi) + contextBridge.exposeInMainWorld('aiApi', aiApi) + contextBridge.exposeInMainWorld('llmApi', llmApi) } catch (error) { console.error(error) } @@ -363,4 +624,8 @@ if (process.contextIsolated) { window.chatApi = chatApi // @ts-ignore (define in dts) window.mergeApi = mergeApi + // @ts-ignore (define in dts) + window.aiApi = aiApi + // @ts-ignore (define in dts) + window.llmApi = llmApi } diff --git a/package.json b/package.json index 2490b20..8aee751 100644 --- a/package.json +++ b/package.json @@ -23,8 +23,10 @@ "dependencies": { "@electron-toolkit/preload": "^3.0.1", "@electron-toolkit/utils": "^4.0.0", + "@types/markdown-it": "^14.1.2", "better-sqlite3": "^12.4.6", "electron-updater": "^6.6.2", + "markdown-it": "^14.1.0", "stream-json": "^1.9.1" }, "devDependencies": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 81430f5..3044748 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -14,12 +14,18 @@ importers: '@electron-toolkit/utils': specifier: ^4.0.0 version: 4.0.0(electron@35.7.5) + '@types/markdown-it': + specifier: ^14.1.2 + version: 14.1.2 better-sqlite3: specifier: ^12.4.6 version: 12.4.6 electron-updater: specifier: ^6.6.2 version: 6.6.2 + markdown-it: + specifier: ^14.1.0 + version: 14.1.0 stream-json: specifier: ^1.9.1 version: 1.9.1 @@ -967,6 +973,15 @@ packages: '@types/keyv@3.1.4': resolution: {integrity: sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==} + '@types/linkify-it@5.0.0': + resolution: {integrity: sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q==} + + '@types/markdown-it@14.1.2': + resolution: {integrity: sha512-promo4eFwuiW+TfGxhi+0x3czqTYJkG8qB17ZUJiVF10Xm7NLVRSLUsfRTU/6h1e24VvRnXCx+hG7li58lkzog==} + + '@types/mdurl@2.0.0': + resolution: {integrity: sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==} + '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} @@ -2454,6 +2469,9 @@ packages: resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==} engines: {node: '>= 12.0.0'} + linkify-it@5.0.0: + resolution: {integrity: sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==} + local-pkg@1.1.2: resolution: {integrity: sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A==} engines: {node: '>=14'} @@ -2511,6 +2529,10 @@ packages: resolution: {integrity: sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ==} engines: {node: ^18.17.0 || >=20.5.0} + markdown-it@14.1.0: + resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} + hasBin: true + matcher@3.0.0: resolution: {integrity: sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==} engines: {node: '>=10'} @@ -2522,6 +2544,9 @@ packages: mdn-data@2.12.2: resolution: {integrity: sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==} + mdurl@2.0.0: + resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} + merge-stream@2.0.0: resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} @@ -2952,6 +2977,10 @@ packages: pump@3.0.3: resolution: {integrity: sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==} + punycode.js@2.3.1: + resolution: {integrity: sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==} + engines: {node: '>=6'} + punycode@2.3.1: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} @@ -3341,6 +3370,9 @@ packages: engines: {node: '>=14.17'} hasBin: true + uc.micro@2.1.0: + resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + ufo@1.6.1: resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==} @@ -4684,6 +4716,15 @@ snapshots: dependencies: '@types/node': 24.10.1 + '@types/linkify-it@5.0.0': {} + + '@types/markdown-it@14.1.2': + dependencies: + '@types/linkify-it': 5.0.0 + '@types/mdurl': 2.0.0 + + '@types/mdurl@2.0.0': {} + '@types/ms@2.1.0': {} '@types/node@22.19.1': @@ -6404,6 +6445,10 @@ snapshots: lightningcss-win32-arm64-msvc: 1.30.2 lightningcss-win32-x64-msvc: 1.30.2 + linkify-it@5.0.0: + dependencies: + uc.micro: 2.1.0 + local-pkg@1.1.2: dependencies: mlly: 1.8.0 @@ -6493,6 +6538,15 @@ snapshots: transitivePeerDependencies: - supports-color + markdown-it@14.1.0: + dependencies: + argparse: 2.0.1 + entities: 4.5.0 + linkify-it: 5.0.0 + mdurl: 2.0.0 + punycode.js: 2.3.1 + uc.micro: 2.1.0 + matcher@3.0.0: dependencies: escape-string-regexp: 4.0.0 @@ -6502,6 +6556,8 @@ snapshots: mdn-data@2.12.2: {} + mdurl@2.0.0: {} + merge-stream@2.0.0: {} merge2@1.4.1: {} @@ -6905,6 +6961,8 @@ snapshots: end-of-stream: 1.4.5 once: 1.4.0 + punycode.js@2.3.1: {} + punycode@2.3.1: {} quansync@0.2.11: {} @@ -7315,6 +7373,8 @@ snapshots: typescript@5.9.3: {} + uc.micro@2.1.0: {} + ufo@1.6.1: {} uncrypto@0.1.3: {} diff --git a/src/components.d.ts b/src/components.d.ts index 9491a23..deb6e5e 100644 --- a/src/components.d.ts +++ b/src/components.d.ts @@ -22,6 +22,7 @@ declare module 'vue' { UModal: typeof import('./../node_modules/.pnpm/@nuxt+ui@4.2.1_@babel+parser@7.28.5_axios@1.13.2_embla-carousel@8.6.0_typescript@5.9.3__1572391ae10a8169a5c9784ec5cec455/node_modules/@nuxt/ui/dist/runtime/components/Modal.vue')['default'] UPopover: typeof import('./../node_modules/.pnpm/@nuxt+ui@4.2.1_@babel+parser@7.28.5_axios@1.13.2_embla-carousel@8.6.0_typescript@5.9.3__1572391ae10a8169a5c9784ec5cec455/node_modules/@nuxt/ui/dist/runtime/components/Popover.vue')['default'] UProgress: typeof import('./../node_modules/.pnpm/@nuxt+ui@4.2.1_@babel+parser@7.28.5_axios@1.13.2_embla-carousel@8.6.0_typescript@5.9.3__1572391ae10a8169a5c9784ec5cec455/node_modules/@nuxt/ui/dist/runtime/components/Progress.vue')['default'] + USelect: typeof import('./../node_modules/.pnpm/@nuxt+ui@4.2.1_@babel+parser@7.28.5_axios@1.13.2_embla-carousel@8.6.0_typescript@5.9.3__1572391ae10a8169a5c9784ec5cec455/node_modules/@nuxt/ui/dist/runtime/components/Select.vue')['default'] USwitch: typeof import('./../node_modules/.pnpm/@nuxt+ui@4.2.1_@babel+parser@7.28.5_axios@1.13.2_embla-carousel@8.6.0_typescript@5.9.3__1572391ae10a8169a5c9784ec5cec455/node_modules/@nuxt/ui/dist/runtime/components/Switch.vue')['default'] UTabs: typeof import('./../node_modules/.pnpm/@nuxt+ui@4.2.1_@babel+parser@7.28.5_axios@1.13.2_embla-carousel@8.6.0_typescript@5.9.3__1572391ae10a8169a5c9784ec5cec455/node_modules/@nuxt/ui/dist/runtime/components/Tabs.vue')['default'] UTooltip: typeof import('./../node_modules/.pnpm/@nuxt+ui@4.2.1_@babel+parser@7.28.5_axios@1.13.2_embla-carousel@8.6.0_typescript@5.9.3__1572391ae10a8169a5c9784ec5cec455/node_modules/@nuxt/ui/dist/runtime/components/Tooltip.vue')['default'] diff --git a/src/components/analysis/AITab.vue b/src/components/analysis/AITab.vue new file mode 100644 index 0000000..1d76794 --- /dev/null +++ b/src/components/analysis/AITab.vue @@ -0,0 +1,94 @@ + + + + + diff --git a/src/components/analysis/RankingTab.vue b/src/components/analysis/RankingTab.vue index aa1608a..4f2f15a 100644 --- a/src/components/analysis/RankingTab.vue +++ b/src/components/analysis/RankingTab.vue @@ -82,7 +82,7 @@ const memberRankData = computed(() => { > 🏆 {{ seasonTitle }} -

各榜单第一名请@群主领取奖品 🎁

+

各榜单前三名请 @群主 领取奖品 🎁

diff --git a/src/components/analysis/ai/AIConfigModal.vue b/src/components/analysis/ai/AIConfigModal.vue new file mode 100644 index 0000000..05cc5d5 --- /dev/null +++ b/src/components/analysis/ai/AIConfigModal.vue @@ -0,0 +1,340 @@ + + + + diff --git a/src/components/analysis/ai/ChatExplorer.vue b/src/components/analysis/ai/ChatExplorer.vue new file mode 100644 index 0000000..92d1cee --- /dev/null +++ b/src/components/analysis/ai/ChatExplorer.vue @@ -0,0 +1,262 @@ + + + diff --git a/src/components/analysis/ai/ChatInput.vue b/src/components/analysis/ai/ChatInput.vue new file mode 100644 index 0000000..48bc2f8 --- /dev/null +++ b/src/components/analysis/ai/ChatInput.vue @@ -0,0 +1,100 @@ + + +