feat: 支持AI分析

This commit is contained in:
digua
2025-12-03 00:42:07 +08:00
parent 3d7cadc123
commit 5a2bab52be
29 changed files with 4022 additions and 4 deletions

View File

@@ -0,0 +1,294 @@
/**
* AI 对话历史管理模块
* 在主进程中执行,管理 AI 对话的持久化存储
*/
import Database from 'better-sqlite3'
import * as fs from 'fs'
import * as path from 'path'
import { app } from 'electron'
// AI 数据库存储目录
let AI_DB_DIR: string | null = null
let AI_DB: Database.Database | null = null
/**
* 获取 AI 数据库目录
*/
function getAiDbDir(): string {
if (AI_DB_DIR) return AI_DB_DIR
try {
const docPath = app.getPath('documents')
AI_DB_DIR = path.join(docPath, 'ChatLab', 'ai')
} catch {
AI_DB_DIR = path.join(process.cwd(), 'ai')
}
return AI_DB_DIR
}
/**
* 确保 AI 数据库目录存在
*/
function ensureAiDbDir(): void {
const dir = getAiDbDir()
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true })
}
}
/**
* 获取 AI 数据库实例(单例)
*/
function getAiDb(): Database.Database {
if (AI_DB) return AI_DB
ensureAiDbDir()
const dbPath = path.join(getAiDbDir(), 'conversations.db')
AI_DB = new Database(dbPath)
AI_DB.pragma('journal_mode = WAL')
// 创建表结构
AI_DB.exec(`
-- AI 对话表
CREATE TABLE IF NOT EXISTS ai_conversation (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
title TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);
-- AI 消息表
CREATE TABLE IF NOT EXISTS ai_message (
id TEXT PRIMARY KEY,
conversation_id TEXT NOT NULL,
role TEXT NOT NULL,
content TEXT NOT NULL,
timestamp INTEGER NOT NULL,
data_keywords TEXT,
data_message_count INTEGER,
FOREIGN KEY(conversation_id) REFERENCES ai_conversation(id) ON DELETE CASCADE
);
-- 索引
CREATE INDEX IF NOT EXISTS idx_ai_conversation_session ON ai_conversation(session_id);
CREATE INDEX IF NOT EXISTS idx_ai_message_conversation ON ai_message(conversation_id);
`)
return AI_DB
}
/**
* 关闭 AI 数据库连接
*/
export function closeAiDatabase(): void {
if (AI_DB) {
AI_DB.close()
AI_DB = null
}
}
// ==================== 类型定义 ====================
/**
* AI 对话类型
*/
export interface AIConversation {
id: string
sessionId: string
title: string | null
createdAt: number
updatedAt: number
}
/**
* AI 消息类型
*/
export interface AIMessage {
id: string
conversationId: string
role: 'user' | 'assistant'
content: string
timestamp: number
dataKeywords?: string[]
dataMessageCount?: number
}
// ==================== 对话管理 ====================
/**
* 创建新对话
*/
export function createConversation(sessionId: string, title?: string): AIConversation {
const db = getAiDb()
const now = Math.floor(Date.now() / 1000)
const id = `conv_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`
db.prepare(`
INSERT INTO ai_conversation (id, session_id, title, created_at, updated_at)
VALUES (?, ?, ?, ?, ?)
`).run(id, sessionId, title || null, now, now)
return {
id,
sessionId,
title: title || null,
createdAt: now,
updatedAt: now,
}
}
/**
* 获取会话的所有对话列表
*/
export function getConversations(sessionId: string): AIConversation[] {
const db = getAiDb()
const rows = db.prepare(`
SELECT id, session_id as sessionId, title, created_at as createdAt, updated_at as updatedAt
FROM ai_conversation
WHERE session_id = ?
ORDER BY updated_at DESC
`).all(sessionId) as AIConversation[]
return rows
}
/**
* 获取单个对话
*/
export function getConversation(conversationId: string): AIConversation | null {
const db = getAiDb()
const row = db.prepare(`
SELECT id, session_id as sessionId, title, created_at as createdAt, updated_at as updatedAt
FROM ai_conversation
WHERE id = ?
`).get(conversationId) as AIConversation | undefined
return row || null
}
/**
* 更新对话标题
*/
export function updateConversationTitle(conversationId: string, title: string): boolean {
const db = getAiDb()
const now = Math.floor(Date.now() / 1000)
const result = db.prepare(`
UPDATE ai_conversation
SET title = ?, updated_at = ?
WHERE id = ?
`).run(title, now, conversationId)
return result.changes > 0
}
/**
* 删除对话(级联删除消息)
*/
export function deleteConversation(conversationId: string): boolean {
const db = getAiDb()
// 先删除消息
db.prepare('DELETE FROM ai_message WHERE conversation_id = ?').run(conversationId)
// 再删除对话
const result = db.prepare('DELETE FROM ai_conversation WHERE id = ?').run(conversationId)
return result.changes > 0
}
// ==================== 消息管理 ====================
/**
* 添加消息到对话
*/
export function addMessage(
conversationId: string,
role: 'user' | 'assistant',
content: string,
dataKeywords?: string[],
dataMessageCount?: number
): AIMessage {
const db = getAiDb()
const now = Math.floor(Date.now() / 1000)
const id = `msg_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`
db.prepare(`
INSERT INTO ai_message (id, conversation_id, role, content, timestamp, data_keywords, data_message_count)
VALUES (?, ?, ?, ?, ?, ?, ?)
`).run(
id,
conversationId,
role,
content,
now,
dataKeywords ? JSON.stringify(dataKeywords) : null,
dataMessageCount ?? null
)
// 更新对话的 updated_at
db.prepare('UPDATE ai_conversation SET updated_at = ? WHERE id = ?').run(now, conversationId)
return {
id,
conversationId,
role,
content,
timestamp: now,
dataKeywords,
dataMessageCount,
}
}
/**
* 获取对话的所有消息
*/
export function getMessages(conversationId: string): AIMessage[] {
const db = getAiDb()
const rows = db.prepare(`
SELECT
id,
conversation_id as conversationId,
role,
content,
timestamp,
data_keywords as dataKeywords,
data_message_count as dataMessageCount
FROM ai_message
WHERE conversation_id = ?
ORDER BY timestamp ASC
`).all(conversationId) as Array<{
id: string
conversationId: string
role: string
content: string
timestamp: number
dataKeywords: string | null
dataMessageCount: number | null
}>
return rows.map((row) => ({
id: row.id,
conversationId: row.conversationId,
role: row.role as 'user' | 'assistant',
content: row.content,
timestamp: row.timestamp,
dataKeywords: row.dataKeywords ? JSON.parse(row.dataKeywords) : undefined,
dataMessageCount: row.dataMessageCount ?? undefined,
}))
}
/**
* 删除单条消息
*/
export function deleteMessage(messageId: string): boolean {
const db = getAiDb()
const result = db.prepare('DELETE FROM ai_message WHERE id = ?').run(messageId)
return result.changes > 0
}

View File

@@ -0,0 +1,184 @@
/**
* DeepSeek LLM Provider
* 使用 OpenAI 兼容的 API 格式
*/
import type {
ILLMService,
LLMProvider,
ChatMessage,
ChatOptions,
ChatResponse,
ChatStreamChunk,
ProviderInfo,
} from './types'
const DEFAULT_BASE_URL = 'https://api.deepseek.com'
const MODELS = [
{ id: 'deepseek-chat', name: 'DeepSeek Chat', description: '通用对话模型' },
{ id: 'deepseek-coder', name: 'DeepSeek Coder', description: '代码生成模型' },
]
export const DEEPSEEK_INFO: ProviderInfo = {
id: 'deepseek',
name: 'DeepSeek',
description: 'DeepSeek AI 大语言模型',
defaultBaseUrl: DEFAULT_BASE_URL,
models: MODELS,
}
export class DeepSeekService implements ILLMService {
private apiKey: string
private baseUrl: string
private model: string
constructor(apiKey: string, model?: string, baseUrl?: string) {
this.apiKey = apiKey
this.baseUrl = baseUrl || DEFAULT_BASE_URL
this.model = model || 'deepseek-chat'
}
getProvider(): LLMProvider {
return 'deepseek'
}
getModels(): string[] {
return MODELS.map((m) => m.id)
}
getDefaultModel(): string {
return 'deepseek-chat'
}
async chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResponse> {
const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages: messages.map((m) => ({ role: m.role, content: m.content })),
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2048,
stream: false,
}),
})
if (!response.ok) {
const error = await response.text()
throw new Error(`DeepSeek API error: ${response.status} - ${error}`)
}
const data = await response.json()
const choice = data.choices?.[0]
return {
content: choice?.message?.content || '',
finishReason: choice?.finish_reason === 'stop' ? 'stop' : choice?.finish_reason === 'length' ? 'length' : 'error',
usage: data.usage
? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
}
: undefined,
}
}
async *chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator<ChatStreamChunk> {
const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages: messages.map((m) => ({ role: m.role, content: m.content })),
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2048,
stream: true,
}),
})
if (!response.ok) {
const error = await response.text()
throw new Error(`DeepSeek API error: ${response.status} - ${error}`)
}
const reader = response.body?.getReader()
if (!reader) {
throw new Error('Failed to get response reader')
}
const decoder = new TextDecoder()
let buffer = ''
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
for (const line of lines) {
const trimmed = line.trim()
if (!trimmed || !trimmed.startsWith('data: ')) continue
const data = trimmed.slice(6)
if (data === '[DONE]') {
yield { content: '', isFinished: true, finishReason: 'stop' }
return
}
try {
const parsed = JSON.parse(data)
const delta = parsed.choices?.[0]?.delta
const finishReason = parsed.choices?.[0]?.finish_reason
if (delta?.content) {
yield {
content: delta.content,
isFinished: false,
}
}
if (finishReason) {
yield {
content: '',
isFinished: true,
finishReason: finishReason === 'stop' ? 'stop' : finishReason === 'length' ? 'length' : 'error',
}
return
}
} catch {
// 忽略解析错误,继续处理下一行
}
}
}
} finally {
reader.releaseLock()
}
}
async validateApiKey(): Promise<boolean> {
try {
// 发送一个简单请求验证 API Key
const response = await fetch(`${this.baseUrl}/v1/models`, {
method: 'GET',
headers: {
Authorization: `Bearer ${this.apiKey}`,
},
})
return response.ok
} catch {
return false
}
}
}

View File

@@ -0,0 +1,220 @@
/**
* LLM 服务模块入口
* 提供统一的 LLM 服务管理
*/
import * as fs from 'fs'
import * as path from 'path'
import { app } from 'electron'
import type { LLMConfig, LLMProvider, ILLMService, ProviderInfo, ChatMessage, ChatOptions, ChatStreamChunk } from './types'
import { DeepSeekService, DEEPSEEK_INFO } from './deepseek'
import { QwenService, QWEN_INFO } from './qwen'
import { aiLogger } from '../logger'
// 导出类型
export * from './types'
// 所有支持的提供商信息
export const PROVIDERS: ProviderInfo[] = [DEEPSEEK_INFO, QWEN_INFO]
// 配置文件路径
let CONFIG_PATH: string | null = null
function getConfigPath(): string {
if (CONFIG_PATH) return CONFIG_PATH
try {
const docPath = app.getPath('documents')
CONFIG_PATH = path.join(docPath, 'ChatLab', 'ai', 'llm-config.json')
} catch {
CONFIG_PATH = path.join(process.cwd(), 'ai', 'llm-config.json')
}
return CONFIG_PATH
}
/**
* LLM 配置管理
*/
export interface StoredConfig {
provider: LLMProvider
apiKey: string
model?: string
maxTokens?: number
}
/**
* 保存 LLM 配置
*/
export function saveLLMConfig(config: StoredConfig): void {
const configPath = getConfigPath()
const dir = path.dirname(configPath)
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true })
}
fs.writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf-8')
}
/**
* 加载 LLM 配置
*/
export function loadLLMConfig(): StoredConfig | null {
const configPath = getConfigPath()
if (!fs.existsSync(configPath)) {
return null
}
try {
const content = fs.readFileSync(configPath, 'utf-8')
return JSON.parse(content) as StoredConfig
} catch {
return null
}
}
/**
* 删除 LLM 配置
*/
export function deleteLLMConfig(): void {
const configPath = getConfigPath()
if (fs.existsSync(configPath)) {
fs.unlinkSync(configPath)
}
}
/**
* 检查是否已配置 LLM
*/
export function hasLLMConfig(): boolean {
const config = loadLLMConfig()
return config !== null && !!config.apiKey
}
/**
* 创建 LLM 服务实例
*/
export function createLLMService(config: LLMConfig): ILLMService {
switch (config.provider) {
case 'deepseek':
return new DeepSeekService(config.apiKey, config.model, config.baseUrl)
case 'qwen':
return new QwenService(config.apiKey, config.model, config.baseUrl)
default:
throw new Error(`Unknown LLM provider: ${config.provider}`)
}
}
/**
* 获取当前配置的 LLM 服务实例
*/
export function getCurrentLLMService(): ILLMService | null {
const config = loadLLMConfig()
if (!config || !config.apiKey) {
return null
}
return createLLMService({
provider: config.provider,
apiKey: config.apiKey,
model: config.model,
maxTokens: config.maxTokens,
})
}
/**
* 获取提供商信息
*/
export function getProviderInfo(provider: LLMProvider): ProviderInfo | null {
return PROVIDERS.find((p) => p.id === provider) || null
}
/**
* 验证 API Key
*/
export async function validateApiKey(provider: LLMProvider, apiKey: string): Promise<boolean> {
const service = createLLMService({ provider, apiKey })
return service.validateApiKey()
}
/**
* 发送聊天请求(使用当前配置)
*/
export async function chat(messages: ChatMessage[], options?: ChatOptions): Promise<string> {
aiLogger.info('LLM', '开始非流式聊天请求', {
messagesCount: messages.length,
firstMessageRole: messages[0]?.role,
firstMessageLength: messages[0]?.content?.length,
options,
})
const service = getCurrentLLMService()
if (!service) {
aiLogger.error('LLM', '服务未配置')
throw new Error('LLM 服务未配置,请先在设置中配置 API Key')
}
aiLogger.info('LLM', `使用提供商: ${service.getProvider()}`)
try {
const response = await service.chat(messages, options)
aiLogger.info('LLM', '非流式请求成功', {
contentLength: response.content?.length,
finishReason: response.finishReason,
usage: response.usage,
})
return response.content
} catch (error) {
aiLogger.error('LLM', '非流式请求失败', { error: String(error) })
throw error
}
}
/**
* 发送聊天请求(流式,使用当前配置)
*/
export async function* chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator<ChatStreamChunk> {
aiLogger.info('LLM', '开始流式聊天请求', {
messagesCount: messages.length,
firstMessageRole: messages[0]?.role,
firstMessageLength: messages[0]?.content?.length,
options,
})
const service = getCurrentLLMService()
if (!service) {
aiLogger.error('LLM', '服务未配置(流式)')
throw new Error('LLM 服务未配置,请先在设置中配置 API Key')
}
aiLogger.info('LLM', `使用提供商(流式): ${service.getProvider()}`)
let chunkCount = 0
let totalContent = ''
try {
for await (const chunk of service.chatStream(messages, options)) {
chunkCount++
totalContent += chunk.content
yield chunk
if (chunk.isFinished) {
aiLogger.info('LLM', '流式请求完成', {
chunkCount,
totalContentLength: totalContent.length,
finishReason: chunk.finishReason,
})
}
}
} catch (error) {
aiLogger.error('LLM', '流式请求失败', {
error: String(error),
chunkCountBeforeError: chunkCount,
})
throw error
}
}

View File

@@ -0,0 +1,185 @@
/**
* 通义千问 (Qwen) LLM Provider
* 使用阿里云 DashScope 兼容 OpenAI 格式的 API
*/
import type {
ILLMService,
LLMProvider,
ChatMessage,
ChatOptions,
ChatResponse,
ChatStreamChunk,
ProviderInfo,
} from './types'
const DEFAULT_BASE_URL = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
const MODELS = [
{ id: 'qwen-turbo', name: 'Qwen Turbo', description: '通义千问超大规模语言模型,速度快' },
{ id: 'qwen-plus', name: 'Qwen Plus', description: '通义千问超大规模语言模型,效果好' },
{ id: 'qwen-max', name: 'Qwen Max', description: '通义千问千亿级别超大规模语言模型' },
]
export const QWEN_INFO: ProviderInfo = {
id: 'qwen',
name: '通义千问',
description: '阿里云通义千问大语言模型',
defaultBaseUrl: DEFAULT_BASE_URL,
models: MODELS,
}
export class QwenService implements ILLMService {
private apiKey: string
private baseUrl: string
private model: string
constructor(apiKey: string, model?: string, baseUrl?: string) {
this.apiKey = apiKey
this.baseUrl = baseUrl || DEFAULT_BASE_URL
this.model = model || 'qwen-turbo'
}
getProvider(): LLMProvider {
return 'qwen'
}
getModels(): string[] {
return MODELS.map((m) => m.id)
}
getDefaultModel(): string {
return 'qwen-turbo'
}
async chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResponse> {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages: messages.map((m) => ({ role: m.role, content: m.content })),
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2048,
stream: false,
}),
})
if (!response.ok) {
const error = await response.text()
throw new Error(`Qwen API error: ${response.status} - ${error}`)
}
const data = await response.json()
const choice = data.choices?.[0]
return {
content: choice?.message?.content || '',
finishReason: choice?.finish_reason === 'stop' ? 'stop' : choice?.finish_reason === 'length' ? 'length' : 'error',
usage: data.usage
? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
}
: undefined,
}
}
async *chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator<ChatStreamChunk> {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
model: this.model,
messages: messages.map((m) => ({ role: m.role, content: m.content })),
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2048,
stream: true,
}),
})
if (!response.ok) {
const error = await response.text()
throw new Error(`Qwen API error: ${response.status} - ${error}`)
}
const reader = response.body?.getReader()
if (!reader) {
throw new Error('Failed to get response reader')
}
const decoder = new TextDecoder()
let buffer = ''
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
for (const line of lines) {
const trimmed = line.trim()
if (!trimmed || !trimmed.startsWith('data: ')) continue
const data = trimmed.slice(6)
if (data === '[DONE]') {
yield { content: '', isFinished: true, finishReason: 'stop' }
return
}
try {
const parsed = JSON.parse(data)
const delta = parsed.choices?.[0]?.delta
const finishReason = parsed.choices?.[0]?.finish_reason
if (delta?.content) {
yield {
content: delta.content,
isFinished: false,
}
}
if (finishReason) {
yield {
content: '',
isFinished: true,
finishReason: finishReason === 'stop' ? 'stop' : finishReason === 'length' ? 'length' : 'error',
}
return
}
} catch {
// 忽略解析错误,继续处理下一行
}
}
}
} finally {
reader.releaseLock()
}
}
async validateApiKey(): Promise<boolean> {
try {
// 发送一个简单请求验证 API Key
const response = await fetch(`${this.baseUrl}/models`, {
method: 'GET',
headers: {
Authorization: `Bearer ${this.apiKey}`,
},
})
return response.ok
} catch {
return false
}
}
}

View File

@@ -0,0 +1,109 @@
/**
* LLM 服务类型定义
*/
/**
* 支持的 LLM 提供商
*/
export type LLMProvider = 'deepseek' | 'qwen'
/**
* LLM 配置
*/
export interface LLMConfig {
provider: LLMProvider
apiKey: string
model?: string
baseUrl?: string
maxTokens?: number
}
/**
* 聊天消息
*/
export interface ChatMessage {
role: 'system' | 'user' | 'assistant'
content: string
}
/**
* 聊天请求选项
*/
export interface ChatOptions {
temperature?: number
maxTokens?: number
stream?: boolean
}
/**
* 非流式响应
*/
export interface ChatResponse {
content: string
finishReason: 'stop' | 'length' | 'error'
usage?: {
promptTokens: number
completionTokens: number
totalTokens: number
}
}
/**
* 流式响应 chunk
*/
export interface ChatStreamChunk {
content: string
isFinished: boolean
finishReason?: 'stop' | 'length' | 'error'
}
/**
* LLM 服务接口
*/
export interface ILLMService {
/**
* 获取提供商名称
*/
getProvider(): LLMProvider
/**
* 获取可用模型列表
*/
getModels(): string[]
/**
* 获取默认模型
*/
getDefaultModel(): string
/**
* 发送聊天请求(非流式)
*/
chat(messages: ChatMessage[], options?: ChatOptions): Promise<ChatResponse>
/**
* 发送聊天请求(流式)
*/
chatStream(messages: ChatMessage[], options?: ChatOptions): AsyncGenerator<ChatStreamChunk>
/**
* 验证 API Key 是否有效
*/
validateApiKey(): Promise<boolean>
}
/**
* 提供商信息
*/
export interface ProviderInfo {
id: LLMProvider
name: string
description: string
defaultBaseUrl: string
models: Array<{
id: string
name: string
description?: string
}>
}

162
electron/main/ai/logger.ts Normal file
View File

@@ -0,0 +1,162 @@
/**
* AI 日志模块
* 将 AI 相关操作日志写入本地文件
*/
import * as fs from 'fs'
import * as path from 'path'
import { app } from 'electron'
// 日志目录
let LOG_DIR: string | null = null
let LOG_FILE: string | null = null
let logStream: fs.WriteStream | null = null
/**
* 获取日志目录
*/
function getLogDir(): string {
if (LOG_DIR) return LOG_DIR
try {
const docPath = app.getPath('documents')
LOG_DIR = path.join(docPath, 'ChatLab', 'logs')
} catch {
LOG_DIR = path.join(process.cwd(), 'logs')
}
return LOG_DIR
}
/**
* 确保日志目录存在
*/
function ensureLogDir(): void {
const dir = getLogDir()
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true })
}
}
/**
* 获取当前日志文件路径
*/
function getLogFilePath(): string {
if (LOG_FILE) return LOG_FILE
ensureLogDir()
const date = new Date().toISOString().split('T')[0]
LOG_FILE = path.join(getLogDir(), `ai_${date}.log`)
return LOG_FILE
}
/**
* 获取日志写入流
*/
function getLogStream(): fs.WriteStream {
if (logStream) return logStream
const filePath = getLogFilePath()
logStream = fs.createWriteStream(filePath, { flags: 'a', encoding: 'utf-8' })
return logStream
}
/**
* 格式化时间戳
*/
function formatTimestamp(): string {
return new Date().toISOString()
}
/**
* 日志级别
*/
type LogLevel = 'DEBUG' | 'INFO' | 'WARN' | 'ERROR'
/**
* 写入日志
*/
function writeLog(level: LogLevel, category: string, message: string, data?: any): void {
const timestamp = formatTimestamp()
let logLine = `[${timestamp}] [${level}] [${category}] ${message}`
if (data !== undefined) {
try {
const dataStr = typeof data === 'string' ? data : JSON.stringify(data, null, 2)
logLine += `\n${dataStr}`
} catch {
logLine += `\n[无法序列化的数据]`
}
}
logLine += '\n'
// 写入文件
try {
const stream = getLogStream()
stream.write(logLine)
} catch (error) {
console.error('[AILogger] 写入日志失败:', error)
}
// 同时输出到控制台
console.log(`[AI] ${logLine.trim()}`)
}
/**
* AI 日志对象
*/
export const aiLogger = {
debug(category: string, message: string, data?: any) {
writeLog('DEBUG', category, message, data)
},
info(category: string, message: string, data?: any) {
writeLog('INFO', category, message, data)
},
warn(category: string, message: string, data?: any) {
writeLog('WARN', category, message, data)
},
error(category: string, message: string, data?: any) {
writeLog('ERROR', category, message, data)
},
/**
* 关闭日志流
*/
close() {
if (logStream) {
logStream.end()
logStream = null
}
},
/**
* 获取日志文件路径
*/
getLogPath(): string {
return getLogFilePath()
},
}
// 导出便捷函数
export function logAI(message: string, data?: any) {
aiLogger.info('AI', message, data)
}
export function logLLM(message: string, data?: any) {
aiLogger.info('LLM', message, data)
}
export function logSearch(message: string, data?: any) {
aiLogger.info('Search', message, data)
}
export function logRAG(message: string, data?: any) {
aiLogger.info('RAG', message, data)
}

View File

@@ -13,6 +13,12 @@ import { detectFormat, type ParseProgress } from './parser'
// 导入合并模块
import * as merger from './merger'
import { deleteTempDatabase, cleanupAllTempDatabases } from './merger/tempCache'
// 导入 AI 对话管理模块
import * as aiConversations from './ai/conversations'
// 导入 LLM 服务模块
import * as llm from './ai/llm'
// 导入 AI 日志模块
import { aiLogger } from './ai/logger'
import type { MergeParams } from '../../src/types/chat'
console.log('[IpcMain] Database, Worker and Parser modules imported')
@@ -718,6 +724,288 @@ const mainIpcMain = (win: BrowserWindow) => {
throw error
}
})
// ==================== AI 功能 ====================
/**
* 搜索消息(关键词搜索)
*/
ipcMain.handle(
'ai:searchMessages',
async (_, sessionId: string, keywords: string[], filter?: { startTs?: number; endTs?: number }, limit?: number, offset?: number) => {
aiLogger.info('IPC', '收到搜索消息请求', {
sessionId,
keywords,
filter,
limit,
offset,
})
try {
const result = await worker.searchMessages(sessionId, keywords, filter, limit, offset)
aiLogger.info('IPC', '搜索消息完成', {
total: result.total,
returned: result.messages.length,
})
return result
} catch (error) {
aiLogger.error('IPC', '搜索消息失败', { error: String(error) })
console.error('搜索消息失败:', error)
return { messages: [], total: 0 }
}
}
)
/**
* 获取消息上下文
*/
ipcMain.handle('ai:getMessageContext', async (_, sessionId: string, messageId: number, contextSize?: number) => {
try {
return await worker.getMessageContext(sessionId, messageId, contextSize)
} catch (error) {
console.error('获取消息上下文失败:', error)
return []
}
})
/**
* 创建 AI 对话
*/
ipcMain.handle('ai:createConversation', async (_, sessionId: string, title?: string) => {
try {
return aiConversations.createConversation(sessionId, title)
} catch (error) {
console.error('创建 AI 对话失败:', error)
throw error
}
})
/**
* 获取会话的所有 AI 对话列表
*/
ipcMain.handle('ai:getConversations', async (_, sessionId: string) => {
try {
return aiConversations.getConversations(sessionId)
} catch (error) {
console.error('获取 AI 对话列表失败:', error)
return []
}
})
/**
* 获取单个 AI 对话
*/
ipcMain.handle('ai:getConversation', async (_, conversationId: string) => {
try {
return aiConversations.getConversation(conversationId)
} catch (error) {
console.error('获取 AI 对话失败:', error)
return null
}
})
/**
* 更新 AI 对话标题
*/
ipcMain.handle('ai:updateConversationTitle', async (_, conversationId: string, title: string) => {
try {
return aiConversations.updateConversationTitle(conversationId, title)
} catch (error) {
console.error('更新 AI 对话标题失败:', error)
return false
}
})
/**
* 删除 AI 对话
*/
ipcMain.handle('ai:deleteConversation', async (_, conversationId: string) => {
try {
return aiConversations.deleteConversation(conversationId)
} catch (error) {
console.error('删除 AI 对话失败:', error)
return false
}
})
/**
* 添加 AI 消息
*/
ipcMain.handle(
'ai:addMessage',
async (_, conversationId: string, role: 'user' | 'assistant', content: string, dataKeywords?: string[], dataMessageCount?: number) => {
try {
return aiConversations.addMessage(conversationId, role, content, dataKeywords, dataMessageCount)
} catch (error) {
console.error('添加 AI 消息失败:', error)
throw error
}
}
)
/**
* 获取 AI 对话的所有消息
*/
ipcMain.handle('ai:getMessages', async (_, conversationId: string) => {
try {
return aiConversations.getMessages(conversationId)
} catch (error) {
console.error('获取 AI 消息失败:', error)
return []
}
})
/**
* 删除 AI 消息
*/
ipcMain.handle('ai:deleteMessage', async (_, messageId: string) => {
try {
return aiConversations.deleteMessage(messageId)
} catch (error) {
console.error('删除 AI 消息失败:', error)
return false
}
})
// ==================== LLM 服务 ====================
/**
* 获取所有支持的 LLM 提供商
*/
ipcMain.handle('llm:getProviders', async () => {
return llm.PROVIDERS
})
/**
* 获取当前 LLM 配置
*/
ipcMain.handle('llm:getConfig', async () => {
const config = llm.loadLLMConfig()
if (!config) return null
// 不返回完整的 API Key只返回脱敏版本
return {
provider: config.provider,
apiKey: config.apiKey ? `${config.apiKey.slice(0, 8)}...${config.apiKey.slice(-4)}` : '',
apiKeySet: !!config.apiKey,
model: config.model,
maxTokens: config.maxTokens,
}
})
/**
* 保存 LLM 配置
*/
ipcMain.handle('llm:saveConfig', async (_, config: { provider: llm.LLMProvider; apiKey: string; model?: string; maxTokens?: number }) => {
try {
llm.saveLLMConfig(config)
return { success: true }
} catch (error) {
console.error('保存 LLM 配置失败:', error)
return { success: false, error: String(error) }
}
})
/**
* 删除 LLM 配置
*/
ipcMain.handle('llm:deleteConfig', async () => {
try {
llm.deleteLLMConfig()
return true
} catch (error) {
console.error('删除 LLM 配置失败:', error)
return false
}
})
/**
* 验证 API Key
*/
ipcMain.handle('llm:validateApiKey', async (_, provider: llm.LLMProvider, apiKey: string) => {
try {
return await llm.validateApiKey(provider, apiKey)
} catch (error) {
console.error('验证 API Key 失败:', error)
return false
}
})
/**
* 检查是否已配置 LLM
*/
ipcMain.handle('llm:hasConfig', async () => {
return llm.hasLLMConfig()
})
/**
* 发送 LLM 聊天请求(非流式)
*/
ipcMain.handle('llm:chat', async (_, messages: llm.ChatMessage[], options?: llm.ChatOptions) => {
aiLogger.info('IPC', '收到非流式 LLM 请求', {
messagesCount: messages.length,
firstMsgRole: messages[0]?.role,
firstMsgContentLen: messages[0]?.content?.length,
options,
})
try {
const response = await llm.chat(messages, options)
aiLogger.info('IPC', '非流式 LLM 请求成功', { responseLength: response.length })
return { success: true, content: response }
} catch (error) {
aiLogger.error('IPC', '非流式 LLM 请求失败', { error: String(error) })
console.error('LLM 聊天失败:', error)
return { success: false, error: String(error) }
}
})
/**
* 发送 LLM 聊天请求(流式)
* 使用 IPC 事件发送流式数据
*/
ipcMain.handle('llm:chatStream', async (_, requestId: string, messages: llm.ChatMessage[], options?: llm.ChatOptions) => {
aiLogger.info('IPC', `收到流式聊天请求: ${requestId}`, {
messagesCount: messages.length,
options,
})
try {
const generator = llm.chatStream(messages, options)
aiLogger.info('IPC', `创建流式生成器: ${requestId}`)
// 异步处理流式响应
;(async () => {
let chunkIndex = 0
try {
aiLogger.info('IPC', `开始迭代流式响应: ${requestId}`)
for await (const chunk of generator) {
chunkIndex++
aiLogger.debug('IPC', `发送 chunk #${chunkIndex}: ${requestId}`, {
contentLength: chunk.content?.length,
isFinished: chunk.isFinished,
finishReason: chunk.finishReason,
})
win.webContents.send('llm:streamChunk', { requestId, chunk })
}
aiLogger.info('IPC', `流式响应完成: ${requestId}`, { totalChunks: chunkIndex })
} catch (error) {
aiLogger.error('IPC', `流式响应出错: ${requestId}`, {
error: String(error),
chunkIndex,
})
win.webContents.send('llm:streamChunk', {
requestId,
chunk: { content: '', isFinished: true, finishReason: 'error' },
error: String(error),
})
}
})()
return { success: true }
} catch (error) {
aiLogger.error('IPC', `创建流式请求失败: ${requestId}`, { error: String(error) })
console.error('LLM 流式聊天失败:', error)
return { success: false, error: String(error) }
}
})
}
export default mainIpcMain

View File

@@ -33,6 +33,8 @@ import {
getLaughAnalysis,
getMemeBattleAnalysis,
getCheckInAnalysis,
searchMessages,
getMessageContext,
} from './query'
import { parseFile, detectFormat } from '../parser'
import { streamImport, streamParseFileInfo } from './import'
@@ -109,6 +111,10 @@ const syncHandlers: Record<string, (payload: any) => any> = {
getLaughAnalysis: (p) => getLaughAnalysis(p.sessionId, p.filter, p.keywords),
getMemeBattleAnalysis: (p) => getMemeBattleAnalysis(p.sessionId, p.filter),
getCheckInAnalysis: (p) => getCheckInAnalysis(p.sessionId, p.filter),
// AI 查询
searchMessages: (p) => searchMessages(p.sessionId, p.keywords, p.filter, p.limit, p.offset),
getMessageContext: (p) => getMessageContext(p.sessionId, p.messageId, p.contextSize),
}
// 异步消息处理器(流式操作)

View File

@@ -36,4 +36,9 @@ export {
// 流式导入 API
streamImport,
streamParseFileInfo,
// AI 查询 API
searchMessages,
getMessageContext,
} from './workerManager'
export type { SearchMessageResult } from './workerManager'

View File

@@ -0,0 +1,128 @@
/**
* AI 查询模块
* 提供关键词搜索功能(在 Worker 线程中执行)
*/
import { openDatabase, buildTimeFilter, type TimeFilter } from '../core'
// ==================== 消息搜索 ====================
/**
* 搜索消息结果类型
*/
export interface SearchMessageResult {
id: number
senderName: string
senderPlatformId: string
content: string
timestamp: number
type: number
}
/**
* 关键词搜索消息
* @param sessionId 会话 ID
* @param keywords 关键词数组OR 逻辑)
* @param filter 时间过滤器
* @param limit 返回数量限制
* @param offset 偏移量(分页)
*/
export function searchMessages(
sessionId: string,
keywords: string[],
filter?: TimeFilter,
limit: number = 20,
offset: number = 0
): { messages: SearchMessageResult[]; total: number } {
const db = openDatabase(sessionId)
if (!db) return { messages: [], total: 0 }
// 构建关键词条件OR 逻辑)
const keywordConditions = keywords.map(() => `msg.content LIKE ?`).join(' OR ')
const keywordParams = keywords.map((k) => `%${k}%`)
// 构建时间过滤条件
const { clause: timeClause, params: timeParams } = buildTimeFilter(filter)
const timeCondition = timeClause ? timeClause.replace('WHERE', 'AND') : ''
// 排除系统消息
const systemFilter = "AND m.name != '系统消息'"
// 查询总数
const countSql = `
SELECT COUNT(*) as total
FROM message msg
JOIN member m ON msg.sender_id = m.id
WHERE (${keywordConditions})
${timeCondition}
${systemFilter}
`
const totalRow = db.prepare(countSql).get(...keywordParams, ...timeParams) as { total: number }
const total = totalRow?.total || 0
// 查询消息
const sql = `
SELECT
msg.id,
m.name as senderName,
m.platform_id as senderPlatformId,
msg.content,
msg.ts as timestamp,
msg.type
FROM message msg
JOIN member m ON msg.sender_id = m.id
WHERE (${keywordConditions})
${timeCondition}
${systemFilter}
ORDER BY msg.ts DESC
LIMIT ? OFFSET ?
`
const rows = db.prepare(sql).all(...keywordParams, ...timeParams, limit, offset) as SearchMessageResult[]
return { messages: rows, total }
}
/**
* 获取消息上下文(指定消息前后的消息)
*/
export function getMessageContext(
sessionId: string,
messageId: number,
contextSize: number = 5
): SearchMessageResult[] {
const db = openDatabase(sessionId)
if (!db) return []
// 获取目标消息的时间戳
const targetMsg = db.prepare('SELECT ts FROM message WHERE id = ?').get(messageId) as { ts: number } | undefined
if (!targetMsg) return []
// 获取前后消息
const sql = `
SELECT
msg.id,
m.name as senderName,
m.platform_id as senderPlatformId,
msg.content,
msg.ts as timestamp,
msg.type
FROM message msg
JOIN member m ON msg.sender_id = m.id
WHERE m.name != '系统消息'
AND msg.ts BETWEEN ? AND ?
ORDER BY msg.ts ASC
LIMIT ?
`
// 获取前后 contextSize 秒的消息(假设平均每秒 1 条消息)
const timeWindow = contextSize * 60 // 前后各 contextSize 分钟
const rows = db.prepare(sql).all(
targetMsg.ts - timeWindow,
targetMsg.ts + timeWindow,
contextSize * 2 + 1
) as SearchMessageResult[]
return rows
}

View File

@@ -32,3 +32,6 @@ export {
getLaughAnalysis,
} from './advanced'
// AI 查询
export { searchMessages, getMessageContext } from './ai'

View File

@@ -350,3 +350,38 @@ export async function streamImport(
export function getDbDirectory(): string {
return getDbDir()
}
// ==================== AI 查询 API ====================
export interface SearchMessageResult {
id: number
senderName: string
senderPlatformId: string
content: string
timestamp: number
type: number
}
/**
* 关键词搜索消息
*/
export async function searchMessages(
sessionId: string,
keywords: string[],
filter?: any,
limit?: number,
offset?: number
): Promise<{ messages: SearchMessageResult[]; total: number }> {
return sendToWorker('searchMessages', { sessionId, keywords, filter, limit, offset })
}
/**
* 获取消息上下文
*/
export async function getMessageContext(
sessionId: string,
messageId: number,
contextSize?: number
): Promise<SearchMessageResult[]> {
return sendToWorker('getMessageContext', { sessionId, messageId, contextSize })
}

View File

@@ -81,13 +81,121 @@ interface MergeApi {
onParseProgress: (callback: (data: { filePath: string; progress: ImportProgress }) => void) => () => void
}
// AI 相关类型
interface SearchMessageResult {
id: number
senderName: string
senderPlatformId: string
content: string
timestamp: number
type: number
}
interface AIConversation {
id: string
sessionId: string
title: string | null
createdAt: number
updatedAt: number
}
interface AIMessage {
id: string
conversationId: string
role: 'user' | 'assistant'
content: string
timestamp: number
dataKeywords?: string[]
dataMessageCount?: number
}
interface AiApi {
searchMessages: (
sessionId: string,
keywords: string[],
filter?: TimeFilter,
limit?: number,
offset?: number
) => Promise<{ messages: SearchMessageResult[]; total: number }>
getMessageContext: (sessionId: string, messageId: number, contextSize?: number) => Promise<SearchMessageResult[]>
createConversation: (sessionId: string, title?: string) => Promise<AIConversation>
getConversations: (sessionId: string) => Promise<AIConversation[]>
getConversation: (conversationId: string) => Promise<AIConversation | null>
updateConversationTitle: (conversationId: string, title: string) => Promise<boolean>
deleteConversation: (conversationId: string) => Promise<boolean>
addMessage: (
conversationId: string,
role: 'user' | 'assistant',
content: string,
dataKeywords?: string[],
dataMessageCount?: number
) => Promise<AIMessage>
getMessages: (conversationId: string) => Promise<AIMessage[]>
deleteMessage: (messageId: string) => Promise<boolean>
}
// LLM 相关类型
interface LLMProviderInfo {
id: string
name: string
description: string
defaultBaseUrl: string
models: Array<{ id: string; name: string; description?: string }>
}
interface LLMConfig {
provider: string
apiKey: string
apiKeySet: boolean
model?: string
maxTokens?: number
}
interface LLMChatMessage {
role: 'system' | 'user' | 'assistant'
content: string
}
interface LLMChatOptions {
temperature?: number
maxTokens?: number
}
interface LLMChatStreamChunk {
content: string
isFinished: boolean
finishReason?: 'stop' | 'length' | 'error'
}
interface LlmApi {
getProviders: () => Promise<LLMProviderInfo[]>
getConfig: () => Promise<LLMConfig | null>
saveConfig: (config: {
provider: string
apiKey: string
model?: string
maxTokens?: number
}) => Promise<{ success: boolean; error?: string }>
deleteConfig: () => Promise<boolean>
validateApiKey: (provider: string, apiKey: string) => Promise<boolean>
hasConfig: () => Promise<boolean>
chat: (messages: LLMChatMessage[], options?: LLMChatOptions) => Promise<{ success: boolean; content?: string; error?: string }>
chatStream: (
messages: LLMChatMessage[],
options?: LLMChatOptions,
onChunk?: (chunk: LLMChatStreamChunk) => void
) => Promise<{ success: boolean; error?: string }>
}
declare global {
interface Window {
electron: ElectronAPI
api: Api
chatApi: ChatApi
mergeApi: MergeApi
aiApi: AiApi
llmApi: LlmApi
}
}
export { ChatApi, Api, MergeApi }
export { ChatApi, Api, MergeApi, AiApi, LlmApi, SearchMessageResult, AIConversation, AIMessage, LLMProviderInfo, LLMConfig, LLMChatMessage, LLMChatOptions, LLMChatStreamChunk }

View File

@@ -42,7 +42,7 @@ const api = {
}
},
receive: (channel: string, func: (...args: unknown[]) => void) => {
const validChannels = ['show-message', 'chat:importProgress', 'merge:parseProgress']
const validChannels = ['show-message', 'chat:importProgress', 'merge:parseProgress', 'llm:streamChunk']
if (validChannels.includes(channel)) {
// Deliberately strip event as it includes `sender`
ipcRenderer.on(channel, (_event, ...args) => func(...args))
@@ -332,6 +332,265 @@ const mergeApi = {
},
}
// AI API - AI 功能
interface SearchMessageResult {
id: number
senderName: string
senderPlatformId: string
content: string
timestamp: number
type: number
}
interface AIConversation {
id: string
sessionId: string
title: string | null
createdAt: number
updatedAt: number
}
interface AIMessage {
id: string
conversationId: string
role: 'user' | 'assistant'
content: string
timestamp: number
dataKeywords?: string[]
dataMessageCount?: number
}
const aiApi = {
/**
* 搜索消息(关键词搜索)
*/
searchMessages: (
sessionId: string,
keywords: string[],
filter?: { startTs?: number; endTs?: number },
limit?: number,
offset?: number
): Promise<{ messages: SearchMessageResult[]; total: number }> => {
return ipcRenderer.invoke('ai:searchMessages', sessionId, keywords, filter, limit, offset)
},
/**
* 获取消息上下文
*/
getMessageContext: (sessionId: string, messageId: number, contextSize?: number): Promise<SearchMessageResult[]> => {
return ipcRenderer.invoke('ai:getMessageContext', sessionId, messageId, contextSize)
},
/**
* 创建 AI 对话
*/
createConversation: (sessionId: string, title?: string): Promise<AIConversation> => {
return ipcRenderer.invoke('ai:createConversation', sessionId, title)
},
/**
* 获取会话的所有 AI 对话列表
*/
getConversations: (sessionId: string): Promise<AIConversation[]> => {
return ipcRenderer.invoke('ai:getConversations', sessionId)
},
/**
* 获取单个 AI 对话
*/
getConversation: (conversationId: string): Promise<AIConversation | null> => {
return ipcRenderer.invoke('ai:getConversation', conversationId)
},
/**
* 更新 AI 对话标题
*/
updateConversationTitle: (conversationId: string, title: string): Promise<boolean> => {
return ipcRenderer.invoke('ai:updateConversationTitle', conversationId, title)
},
/**
* 删除 AI 对话
*/
deleteConversation: (conversationId: string): Promise<boolean> => {
return ipcRenderer.invoke('ai:deleteConversation', conversationId)
},
/**
* 添加 AI 消息
*/
addMessage: (
conversationId: string,
role: 'user' | 'assistant',
content: string,
dataKeywords?: string[],
dataMessageCount?: number
): Promise<AIMessage> => {
return ipcRenderer.invoke('ai:addMessage', conversationId, role, content, dataKeywords, dataMessageCount)
},
/**
* 获取 AI 对话的所有消息
*/
getMessages: (conversationId: string): Promise<AIMessage[]> => {
return ipcRenderer.invoke('ai:getMessages', conversationId)
},
/**
* 删除 AI 消息
*/
deleteMessage: (messageId: string): Promise<boolean> => {
return ipcRenderer.invoke('ai:deleteMessage', messageId)
},
}
// LLM API - LLM 服务功能
interface LLMProvider {
id: string
name: string
description: string
defaultBaseUrl: string
models: Array<{ id: string; name: string; description?: string }>
}
interface LLMConfig {
provider: string
apiKey: string
apiKeySet: boolean
model?: string
maxTokens?: number
}
interface ChatMessage {
role: 'system' | 'user' | 'assistant'
content: string
}
interface ChatOptions {
temperature?: number
maxTokens?: number
}
interface ChatStreamChunk {
content: string
isFinished: boolean
finishReason?: 'stop' | 'length' | 'error'
}
const llmApi = {
/**
* 获取所有支持的 LLM 提供商
*/
getProviders: (): Promise<LLMProvider[]> => {
return ipcRenderer.invoke('llm:getProviders')
},
/**
* 获取当前 LLM 配置
*/
getConfig: (): Promise<LLMConfig | null> => {
return ipcRenderer.invoke('llm:getConfig')
},
/**
* 保存 LLM 配置
*/
saveConfig: (config: {
provider: string
apiKey: string
model?: string
maxTokens?: number
}): Promise<{ success: boolean; error?: string }> => {
return ipcRenderer.invoke('llm:saveConfig', config)
},
/**
* 删除 LLM 配置
*/
deleteConfig: (): Promise<boolean> => {
return ipcRenderer.invoke('llm:deleteConfig')
},
/**
* 验证 API Key
*/
validateApiKey: (provider: string, apiKey: string): Promise<boolean> => {
return ipcRenderer.invoke('llm:validateApiKey', provider, apiKey)
},
/**
* 检查是否已配置 LLM
*/
hasConfig: (): Promise<boolean> => {
return ipcRenderer.invoke('llm:hasConfig')
},
/**
* 发送 LLM 聊天请求(非流式)
*/
chat: (messages: ChatMessage[], options?: ChatOptions): Promise<{ success: boolean; content?: string; error?: string }> => {
return ipcRenderer.invoke('llm:chat', messages, options)
},
/**
* 发送 LLM 聊天请求(流式)
* 返回一个 Promise该 Promise 在流完成后才 resolve
*/
chatStream: (
messages: ChatMessage[],
options?: ChatOptions,
onChunk?: (chunk: ChatStreamChunk) => void
): Promise<{ success: boolean; error?: string }> => {
return new Promise((resolve) => {
const requestId = `llm_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`
console.log('[preload] chatStream 开始requestId:', requestId)
const handler = (
_event: Electron.IpcRendererEvent,
data: { requestId: string; chunk: ChatStreamChunk; error?: string }
) => {
if (data.requestId === requestId) {
if (data.error) {
console.log('[preload] chatStream 收到错误:', data.error)
if (onChunk) {
onChunk({ content: '', isFinished: true, finishReason: 'error' })
}
ipcRenderer.removeListener('llm:streamChunk', handler)
resolve({ success: false, error: data.error })
} else {
if (onChunk) {
onChunk(data.chunk)
}
// 如果已完成,移除监听器并 resolve
if (data.chunk.isFinished) {
console.log('[preload] chatStream 完成requestId:', requestId)
ipcRenderer.removeListener('llm:streamChunk', handler)
resolve({ success: true })
}
}
}
}
ipcRenderer.on('llm:streamChunk', handler)
// 发起请求
ipcRenderer.invoke('llm:chatStream', requestId, messages, options).then((result) => {
console.log('[preload] chatStream invoke 返回:', result)
if (!result.success) {
ipcRenderer.removeListener('llm:streamChunk', handler)
resolve(result)
}
// 如果 success等待流完成由 handler 处理 resolve
}).catch((error) => {
console.error('[preload] chatStream invoke 错误:', error)
ipcRenderer.removeListener('llm:streamChunk', handler)
resolve({ success: false, error: String(error) })
})
})
},
}
// 扩展 api添加 dialog 功能
const extendedApi = {
...api,
@@ -351,6 +610,8 @@ if (process.contextIsolated) {
contextBridge.exposeInMainWorld('api', extendedApi)
contextBridge.exposeInMainWorld('chatApi', chatApi)
contextBridge.exposeInMainWorld('mergeApi', mergeApi)
contextBridge.exposeInMainWorld('aiApi', aiApi)
contextBridge.exposeInMainWorld('llmApi', llmApi)
} catch (error) {
console.error(error)
}
@@ -363,4 +624,8 @@ if (process.contextIsolated) {
window.chatApi = chatApi
// @ts-ignore (define in dts)
window.mergeApi = mergeApi
// @ts-ignore (define in dts)
window.aiApi = aiApi
// @ts-ignore (define in dts)
window.llmApi = llmApi
}