feat: 优化了语音配置页面的效果;新增语音实际波形图显示;新增语音点击跳转进度

fix: 修复了一个可能导致语音解密错乱的问题
This commit is contained in:
cc
2026-01-18 00:01:07 +08:00
parent 0853e049c8
commit be4d9b510d
16 changed files with 567 additions and 291 deletions

View File

@@ -442,6 +442,9 @@ function registerIpcHandlers() {
ipcMain.handle('chat:getVoiceData', async (_, sessionId: string, msgId: string, createTime?: number, serverId?: string | number) => {
return chatService.getVoiceData(sessionId, msgId, createTime, serverId)
})
ipcMain.handle('chat:resolveVoiceCache', async (_, sessionId: string, msgId: string) => {
return chatService.resolveVoiceCache(sessionId, msgId)
})
ipcMain.handle('chat:getVoiceTranscript', async (event, sessionId: string, msgId: string) => {
return chatService.getVoiceTranscript(sessionId, msgId, (text) => {

View File

@@ -108,6 +108,7 @@ contextBridge.exposeInMainWorld('electronAPI', {
getImageData: (sessionId: string, msgId: string) => ipcRenderer.invoke('chat:getImageData', sessionId, msgId),
getVoiceData: (sessionId: string, msgId: string, createTime?: number, serverId?: string | number) =>
ipcRenderer.invoke('chat:getVoiceData', sessionId, msgId, createTime, serverId),
resolveVoiceCache: (sessionId: string, msgId: string) => ipcRenderer.invoke('chat:resolveVoiceCache', sessionId, msgId),
getVoiceTranscript: (sessionId: string, msgId: string) => ipcRenderer.invoke('chat:getVoiceTranscript', sessionId, msgId),
onVoiceTranscriptPartial: (callback: (payload: { msgId: string; text: string }) => void) => {
const listener = (_: any, payload: { msgId: string; text: string }) => callback(payload)

View File

@@ -2202,7 +2202,7 @@ class ChatService {
// 3. 调用 C++ 接口获取语音 (Hex)
const voiceRes = await wcdbService.getVoiceData(sessionId, msgCreateTime, candidates, msgSvrId)
const voiceRes = await wcdbService.getVoiceData(sessionId, msgCreateTime, candidates, localId, msgSvrId)
if (!voiceRes.success || !voiceRes.hex) {
return { success: false, error: voiceRes.error || '未找到语音数据' }
}
@@ -2245,6 +2245,33 @@ class ChatService {
}
}
/**
* 检查语音是否已有缓存
*/
async resolveVoiceCache(sessionId: string, msgId: string): Promise<{ success: boolean; hasCache: boolean; data?: string }> {
try {
const cacheKey = this.getVoiceCacheKey(sessionId, msgId)
// 1. 检查内存缓存
const inMemory = this.voiceWavCache.get(cacheKey)
if (inMemory) {
return { success: true, hasCache: true, data: inMemory.toString('base64') }
}
// 2. 检查文件缓存
const cachedFile = this.getVoiceCacheFilePath(cacheKey)
if (existsSync(cachedFile)) {
const wavData = readFileSync(cachedFile)
this.cacheVoiceWav(cacheKey, wavData) // 回甜内存
return { success: true, hasCache: true, data: wavData.toString('base64') }
}
return { success: true, hasCache: false }
} catch (e) {
return { success: false, hasCache: false }
}
}
async getVoiceData_Legacy(sessionId: string, msgId: string): Promise<{ success: boolean; data?: string; error?: string }> {
try {
const localId = parseInt(msgId, 10)

View File

@@ -14,7 +14,6 @@ type ModelInfo = {
files: {
model: string
tokens: string
vad: string
}
sizeBytes: number
sizeLabel: string
@@ -31,8 +30,7 @@ const SENSEVOICE_MODEL: ModelInfo = {
name: 'SenseVoiceSmall',
files: {
model: 'model.int8.onnx',
tokens: 'tokens.txt',
vad: 'silero_vad.onnx'
tokens: 'tokens.txt'
},
sizeBytes: 245_000_000,
sizeLabel: '245 MB'
@@ -40,8 +38,7 @@ const SENSEVOICE_MODEL: ModelInfo = {
const MODEL_DOWNLOAD_URLS = {
model: 'https://modelscope.cn/models/pengzhendong/sherpa-onnx-sense-voice-zh-en-ja-ko-yue/resolve/master/model.int8.onnx',
tokens: 'https://modelscope.cn/models/pengzhendong/sherpa-onnx-sense-voice-zh-en-ja-ko-yue/resolve/master/tokens.txt',
vad: 'https://www.modelscope.cn/models/manyeyes/silero-vad-onnx/resolve/master/silero_vad.onnx'
tokens: 'https://modelscope.cn/models/pengzhendong/sherpa-onnx-sense-voice-zh-en-ja-ko-yue/resolve/master/tokens.txt'
}
export class VoiceTranscribeService {
@@ -74,12 +71,9 @@ export class VoiceTranscribeService {
try {
const modelPath = this.resolveModelPath(SENSEVOICE_MODEL.files.model)
const tokensPath = this.resolveModelPath(SENSEVOICE_MODEL.files.tokens)
const vadPath = this.resolveModelPath((SENSEVOICE_MODEL.files as any).vad)
const modelExists = existsSync(modelPath)
const tokensExists = existsSync(tokensPath)
const vadExists = existsSync(vadPath)
const exists = modelExists && tokensExists && vadExists
const exists = modelExists && tokensExists
if (!exists) {
return { success: true, exists: false, modelPath, tokensPath }
@@ -87,8 +81,7 @@ export class VoiceTranscribeService {
const modelSize = statSync(modelPath).size
const tokensSize = statSync(tokensPath).size
const vadSize = statSync(vadPath).size
const totalSize = modelSize + tokensSize + vadSize
const totalSize = modelSize + tokensSize
return {
success: true,
@@ -121,7 +114,6 @@ export class VoiceTranscribeService {
const modelPath = this.resolveModelPath(SENSEVOICE_MODEL.files.model)
const tokensPath = this.resolveModelPath(SENSEVOICE_MODEL.files.tokens)
const vadPath = this.resolveModelPath((SENSEVOICE_MODEL.files as any).vad)
// 初始进度
onProgress?.({
@@ -166,35 +158,16 @@ export class VoiceTranscribeService {
}
)
// 下载 vad 文件 (30%)
console.info('[VoiceTranscribe] 开始下载 VAD 文件...')
await this.downloadToFile(
(MODEL_DOWNLOAD_URLS as any).vad,
vadPath,
'vad',
(downloaded, total) => {
const modelSize = existsSync(modelPath) ? statSync(modelPath).size : 0
const tokensSize = existsSync(tokensPath) ? statSync(tokensPath).size : 0
const percent = total ? 70 + (downloaded / total) * 30 : 70
onProgress?.({
modelName: SENSEVOICE_MODEL.name,
downloadedBytes: modelSize + tokensSize + downloaded,
totalBytes: SENSEVOICE_MODEL.sizeBytes,
percent
})
}
)
console.info('[VoiceTranscribe] 模型下载完成')
console.info('[VoiceTranscribe] 所有文件下载完成')
return { success: true, modelPath, tokensPath }
} catch (error) {
const modelPath = this.resolveModelPath(SENSEVOICE_MODEL.files.model)
const tokensPath = this.resolveModelPath(SENSEVOICE_MODEL.files.tokens)
const vadPath = this.resolveModelPath((SENSEVOICE_MODEL.files as any).vad)
try {
if (existsSync(modelPath)) unlinkSync(modelPath)
if (existsSync(tokensPath)) unlinkSync(tokensPath)
if (existsSync(vadPath)) unlinkSync(vadPath)
} catch { }
return { success: false, error: String(error) }
} finally {
@@ -230,7 +203,7 @@ export class VoiceTranscribeService {
supportedLanguages = this.configService.get('transcribeLanguages')
// 如果配置中也没有或为空,使用默认值
if (!supportedLanguages || supportedLanguages.length === 0) {
supportedLanguages = ['zh']
supportedLanguages = ['zh', 'yue']
}
}
@@ -303,7 +276,7 @@ export class VoiceTranscribeService {
const request = protocol.get(url, options, (response) => {
console.info(`[VoiceTranscribe] ${fileName} 响应状态:`, response.statusCode)
// 处理重定向
if ([301, 302, 303, 307, 308].includes(response.statusCode || 0) && response.headers.location) {
if (remainingRedirects <= 0) {
@@ -324,11 +297,11 @@ export class VoiceTranscribeService {
const totalBytes = Number(response.headers['content-length'] || 0) || undefined
let downloadedBytes = 0
console.info(`[VoiceTranscribe] ${fileName} 文件大小:`, totalBytes ? `${(totalBytes / 1024 / 1024).toFixed(2)} MB` : '未知')
const writer = createWriteStream(targetPath)
// 设置数据接收超时60秒没有数据则超时
let lastDataTime = Date.now()
const dataTimeout = setInterval(() => {
@@ -392,7 +365,7 @@ export class VoiceTranscribeService {
// sherpa-onnx 的 recognizer 可能需要手动释放
this.recognizer = null
} catch (error) {
}
}
}
}
}

View File

@@ -347,9 +347,9 @@ export class WcdbCore {
this.wcdbGetDbStatus = null
}
// wcdb_status wcdb_get_voice_data(wcdb_handle handle, const char* session_id, int32_t create_time, const char* candidates_json, char** out_hex)
// wcdb_status wcdb_get_voice_data(wcdb_handle handle, const char* session_id, int32_t create_time, int32_t local_id, int64_t svr_id, const char* candidates_json, char** out_hex)
try {
this.wcdbGetVoiceData = this.lib.func('int32 wcdb_get_voice_data(int64 handle, const char* sessionId, int32 createTime, int64 svrId, const char* candidatesJson, _Out_ void** outHex)')
this.wcdbGetVoiceData = this.lib.func('int32 wcdb_get_voice_data(int64 handle, const char* sessionId, int32 createTime, int32 localId, int64 svrId, const char* candidatesJson, _Out_ void** outHex)')
} catch {
this.wcdbGetVoiceData = null
}
@@ -1321,12 +1321,12 @@ export class WcdbCore {
}
}
async getVoiceData(sessionId: string, createTime: number, candidates: string[], svrId: string | number = 0): Promise<{ success: boolean; hex?: string; error?: string }> {
async getVoiceData(sessionId: string, createTime: number, candidates: string[], localId: number = 0, svrId: string | number = 0): Promise<{ success: boolean; hex?: string; error?: string }> {
if (!this.ensureReady()) return { success: false, error: 'WCDB 未连接' }
if (!this.wcdbGetVoiceData) return { success: false, error: '当前 DLL 版本不支持获取语音数据' }
try {
const outPtr = [null as any]
const result = this.wcdbGetVoiceData(this.handle, sessionId, createTime, BigInt(svrId || 0), JSON.stringify(candidates), outPtr)
const result = this.wcdbGetVoiceData(this.handle, sessionId, createTime, localId, BigInt(svrId || 0), JSON.stringify(candidates), outPtr)
if (result !== 0 || !outPtr[0]) {
return { success: false, error: `获取语音数据失败: ${result}` }
}

View File

@@ -99,7 +99,7 @@ export class WcdbService {
setPaths(resourcesPath: string, userDataPath: string): void {
this.resourcesPath = resourcesPath
this.userDataPath = userDataPath
this.callWorker('setPaths', { resourcesPath, userDataPath }).catch(() => {})
this.callWorker('setPaths', { resourcesPath, userDataPath }).catch(() => { })
}
/**
@@ -107,7 +107,7 @@ export class WcdbService {
*/
setLogEnabled(enabled: boolean): void {
this.logEnabled = enabled
this.callWorker('setLogEnabled', { enabled }).catch(() => {})
this.callWorker('setLogEnabled', { enabled }).catch(() => { })
}
/**
@@ -346,8 +346,8 @@ export class WcdbService {
/**
* 获取语音数据
*/
async getVoiceData(sessionId: string, createTime: number, candidates: string[], svrId: string | number = 0): Promise<{ success: boolean; hex?: string; error?: string }> {
return this.callWorker('getVoiceData', { sessionId, createTime, candidates, svrId })
async getVoiceData(sessionId: string, createTime: number, candidates: string[], localId: number = 0, svrId: string | number = 0): Promise<{ success: boolean; hex?: string; error?: string }> {
return this.callWorker('getVoiceData', { sessionId, createTime, candidates, localId, svrId })
}
}

View File

@@ -1,5 +1,4 @@
import { parentPort, workerData } from 'worker_threads'
import * as fs from 'fs'
interface WorkerParams {
modelPath: string
@@ -18,16 +17,66 @@ const LANGUAGE_TAGS: Record<string, string> = {
'yue': '<|yue|>' // 粤语
}
// 技术标签识别语言、语速、ITN等需要从最终文本中移除
const TECH_TAGS = [
'<|zh|>', '<|en|>', '<|ja|>', '<|ko|>', '<|yue|>',
'<|nospeech|>', '<|speech|>',
'<|itn|>', '<|wo_itn|>',
'<|NORMAL|>'
]
// 情感与事件标签映射,转换为直观的 Emoji
const RICH_TAG_MAP: Record<string, string> = {
'<|HAPPY|>': '😊',
'<|SAD|>': '😔',
'<|ANGRY|>': '😠',
'<|NEUTRAL|>': '', // 中性情感不特别标记
'<|FEARFUL|>': '😨',
'<|DISGUSTED|>': '🤢',
'<|SURPRISED|>': '😮',
'<|BGM|>': '🎵',
'<|Applause|>': '👏',
'<|Laughter|>': '😂',
'<|Cry|>': '😭',
'<|Cough|>': ' (咳嗽) ',
'<|Sneeze|>': ' (喷嚏) ',
}
/**
* 富文本后处理:移除技术标签,转换识别出的情感和声音事件
*/
function richTranscribePostProcess(text: string): string {
if (!text) return ''
let processed = text
// 1. 转换情感和事件标签
for (const [tag, replacement] of Object.entries(RICH_TAG_MAP)) {
// 使用正则全局替换,不区分大小写以防不同版本差异
const escapedTag = tag.replace(/[|<>]/g, '\\$&')
processed = processed.replace(new RegExp(escapedTag, 'gi'), replacement)
}
// 2. 移除所有剩余的技术标签
for (const tag of TECH_TAGS) {
const escapedTag = tag.replace(/[|<>]/g, '\\$&')
processed = processed.replace(new RegExp(escapedTag, 'gi'), '')
}
// 3. 清理多余空格并返回
return processed.replace(/\s+/g, ' ').trim()
}
// 检查识别结果是否在允许的语言列表中
function isLanguageAllowed(result: any, allowedLanguages: string[]): boolean {
if (!result || !result.lang) {
// 如果没有语言信息,默认允许
// 如果没有语言信息,默认允许(或从文本开头尝试提取)
return true
}
// 如果没有指定语言或语言列表为空,默认允许中文
// 如果没有指定语言或语言列表为空,默认允许中文和粤语
if (!allowedLanguages || allowedLanguages.length === 0) {
allowedLanguages = ['zh']
allowedLanguages = ['zh', 'yue']
}
const langTag = result.lang
@@ -55,7 +104,7 @@ async function run() {
let sherpa: any;
try {
sherpa = require('sherpa-onnx-node');
} catch (requireError) {
} catch (requireError) {
parentPort.postMessage({ type: 'error', error: 'Failed to load speech engine: ' + String(requireError) });
return;
}
@@ -65,11 +114,11 @@ async function run() {
// 确保有有效的语言列表,默认只允许中文
let allowedLanguages = languages || ['zh']
if (allowedLanguages.length === 0) {
allowedLanguages = ['zh']
allowedLanguages = ['zh']
}
console.log('[TranscribeWorker] 使用的语言白名单:', allowedLanguages)
// 1. 初始化识别器 (SenseVoiceSmall)
const recognizerConfig = {
modelConfig: {
@@ -83,122 +132,31 @@ async function run() {
}
}
const recognizer = new sherpa.OfflineRecognizer(recognizerConfig)
// 2. 初始化 VAD (用于流式输出效果)
const vadPath = modelPath.replace('model.int8.onnx', 'silero_vad.onnx');
const vadConfig = {
sileroVad: {
model: vadPath,
threshold: 0.5,
minSilenceDuration: 0.5,
minSpeechDuration: 0.25,
windowSize: 512
},
sampleRate: sampleRate,
debug: 0,
numThreads: 1
}
// 检查 VAD 模型是否存在,如果不存在则退回到全量识别
if (!fs.existsSync(vadPath)) {
const pcmData = wavData.slice(44)
const samples = new Float32Array(pcmData.length / 2)
for (let i = 0; i < samples.length; i++) {
samples[i] = pcmData.readInt16LE(i * 2) / 32768.0
}
const stream = recognizer.createStream()
stream.acceptWaveform({ sampleRate, samples })
recognizer.decode(stream)
const result = recognizer.getResult(stream)
console.log('[TranscribeWorker] 非VAD模式 - 识别结果对象:', JSON.stringify(result, null, 2))
// 检查语言是否在白名单中
if (isLanguageAllowed(result, allowedLanguages)) {
console.log('[TranscribeWorker] 非VAD模式 - 保留文本:', result.text)
parentPort.postMessage({ type: 'final', text: result.text })
} else {
console.log('[TranscribeWorker] 非VAD模式 - 语言不匹配,返回空文本')
parentPort.postMessage({ type: 'final', text: '' })
}
return
}
const vad = new sherpa.Vad(vadConfig, 60) // 60s max
// 3. 处理音频数据
// 2. 处理音频数据 (全量识别)
const pcmData = wavData.slice(44)
const samples = new Float32Array(pcmData.length / 2)
for (let i = 0; i < samples.length; i++) {
samples[i] = pcmData.readInt16LE(i * 2) / 32768.0
}
// 模拟流式输入:按小块喂给 VAD
const chunkSize = 1600 // 100ms for 16kHz
let offset = 0
let accumulatedText = ''
const stream = recognizer.createStream()
stream.acceptWaveform({ sampleRate, samples })
recognizer.decode(stream)
const result = recognizer.getResult(stream)
let segmentCount = 0;
console.log('[TranscribeWorker] 识别完成 - 结果对象:', JSON.stringify(result, null, 2))
while (offset < samples.length) {
const end = Math.min(offset + chunkSize, samples.length)
const chunk = samples.subarray(offset, end)
vad.acceptWaveform(chunk)
// 检查 ASR 结果
while (!vad.isEmpty()) {
const segment = vad.front(false)
const stream = recognizer.createStream()
stream.acceptWaveform({ sampleRate, samples: segment.samples })
recognizer.decode(stream)
const result = recognizer.getResult(stream)
console.log('[TranscribeWorker] 识别结果 - lang:', result.lang, 'text:', result.text)
// 检查语言是否在白名单中
if (result.text && isLanguageAllowed(result, allowedLanguages)) {
const text = result.text.trim()
if (text.length > 0) {
accumulatedText += (accumulatedText ? ' ' : '') + text
segmentCount++;
parentPort.postMessage({ type: 'partial', text: accumulatedText })
}
} else if (result.text) {
console.log('[TranscribeWorker] 跳过不匹配的语言段落')
}
vad.pop()
}
offset = end
// 让出主循环,保持响应
await new Promise(resolve => setImmediate(resolve))
// 3. 检查语言是否在白名单中
if (isLanguageAllowed(result, allowedLanguages)) {
const processedText = richTranscribePostProcess(result.text)
console.log('[TranscribeWorker] 语言匹配,返回文本:', processedText)
parentPort.postMessage({ type: 'final', text: processedText })
} else {
console.log('[TranscribeWorker] 语言不匹配,返回空文本')
parentPort.postMessage({ type: 'final', text: '' })
}
// Ensure any remaining buffer is processed
vad.flush();
while (!vad.isEmpty()) {
const segment = vad.front(false);
const stream = recognizer.createStream()
stream.acceptWaveform({ sampleRate, samples: segment.samples })
recognizer.decode(stream)
const result = recognizer.getResult(stream)
console.log('[TranscribeWorker] flush阶段 - lang:', result.lang, 'text:', result.text)
// 检查语言是否在白名单中
if (result.text && isLanguageAllowed(result, allowedLanguages)) {
const text = result.text.trim()
if (text) {
accumulatedText += (accumulatedText ? ' ' : '') + text
parentPort.postMessage({ type: 'partial', text: accumulatedText })
}
}
vad.pop();
}
parentPort.postMessage({ type: 'final', text: accumulatedText })
} catch (error) {
parentPort.postMessage({ type: 'error', error: String(error) })
}

View File

@@ -111,7 +111,7 @@ if (parentPort) {
result = await core.getMessageById(payload.sessionId, payload.localId)
break
case 'getVoiceData':
result = await core.getVoiceData(payload.sessionId, payload.createTime, payload.candidates, payload.svrId)
result = await core.getVoiceData(payload.sessionId, payload.createTime, payload.candidates, payload.localId, payload.svrId)
if (!result.success) {
console.error('[wcdbWorker] getVoiceData failed:', result.error)
}

Binary file not shown.

View File

@@ -44,12 +44,22 @@ export const AnimatedStreamingText = memo(({ text, className, loading }: Animate
))}
<style>{`
.fade-in-text {
animation: fadeIn 0.5s ease-out forwards;
animation: premiumFadeIn 0.8s cubic-bezier(0.16, 1, 0.3, 1) forwards;
opacity: 0;
display: inline-block;
filter: blur(4px);
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(2px); }
to { opacity: 1; transform: translateY(0); }
@keyframes premiumFadeIn {
from {
opacity: 0;
transform: translateY(4px) scale(0.98);
filter: blur(4px);
}
to {
opacity: 1;
transform: translateY(0) scale(1);
filter: blur(0);
}
}
.dot-flashing {
animation: blink 1s infinite;

View File

@@ -102,7 +102,7 @@ export const VoiceTranscribeDialog: React.FC<VoiceTranscribeDialogProps> = ({
</div>
<div className="model-item">
<span className="label"></span>
<span className="value"></span>
<span className="value"></span>
</div>
</div>
</div>

View File

@@ -1108,6 +1108,14 @@
border-radius: 16px;
}
}
// 使发送的语音消息和转文字也使用接收者的样式 (浅色)
&.sent.voice {
.bubble-content {
background: var(--bg-secondary);
color: var(--text-primary);
}
}
}
.bubble-avatar {
@@ -1309,10 +1317,6 @@
gap: 6px;
}
.message-bubble.sent .voice-message {
background: rgba(255, 255, 255, 0.18);
}
.voice-play-btn {
width: 32px;
height: 32px;
@@ -1345,6 +1349,50 @@
}
}
.voice-waveform {
flex: 1;
display: flex;
align-items: center;
gap: 2px;
height: 24px;
min-width: 120px;
}
.waveform-bar {
flex: 1;
width: 2px;
background: rgba(0, 0, 0, 0.1);
border-radius: 1px;
transition: transform 0.2s ease, background 0.2s ease;
&.played {
background: var(--primary);
}
}
.message-bubble.sent.voice .waveform-bar {
background: rgba(0, 0, 0, 0.1); // 基色改为透明黑
&.played {
background: var(--primary);
}
}
.voice-wave-placeholder {
display: flex;
align-items: flex-end;
gap: 3px;
height: 18px;
span {
width: 3px;
height: 8px;
border-radius: 2px;
background: var(--text-tertiary);
opacity: 0.6;
}
}
.voice-message.playing .voice-wave span {
animation: voicePulse 0.9s ease-in-out infinite;
}
@@ -1403,23 +1451,13 @@
border-radius: 14px;
font-size: 13px;
line-height: 1.5;
background: var(--bg-secondary);
background: var(--card-bg);
color: var(--text-primary);
border: 1px solid var(--border-color);
word-break: break-word;
white-space: pre-wrap;
}
.voice-transcript.sent {
background: rgba(255, 255, 255, 0.9);
color: #333333;
border-color: transparent;
}
.voice-transcript.received {
background: var(--card-bg);
}
.voice-transcript.error {
color: #d9480f;
cursor: pointer;
@@ -1882,6 +1920,7 @@
transform: translateX(0);
}
}
/* 语音转文字按钮样式 */
.voice-transcribe-btn {
width: 28px;
@@ -1909,4 +1948,4 @@
width: 14px;
height: 14px;
}
}
}

View File

@@ -1366,6 +1366,10 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat, o
const voiceTranscriptRequestedRef = useRef(false)
const [showImagePreview, setShowImagePreview] = useState(false)
const [autoTranscribeVoice, setAutoTranscribeVoice] = useState(true)
const [voiceCurrentTime, setVoiceCurrentTime] = useState(0)
const [voiceDuration, setVoiceDuration] = useState(0)
const [voiceWaveform, setVoiceWaveform] = useState<number[]>([])
const voiceAutoDecryptTriggered = useRef(false)
// 加载自动转文字配置
useEffect(() => {
@@ -1658,18 +1662,92 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat, o
if (!audio) return
const handlePlay = () => setIsVoicePlaying(true)
const handlePause = () => setIsVoicePlaying(false)
const handleEnded = () => setIsVoicePlaying(false)
const handleEnded = () => {
setIsVoicePlaying(false)
setVoiceCurrentTime(0)
}
const handleTimeUpdate = () => {
setVoiceCurrentTime(audio.currentTime)
}
const handleLoadedMetadata = () => {
setVoiceDuration(audio.duration)
}
audio.addEventListener('play', handlePlay)
audio.addEventListener('pause', handlePause)
audio.addEventListener('ended', handleEnded)
audio.addEventListener('timeupdate', handleTimeUpdate)
audio.addEventListener('loadedmetadata', handleLoadedMetadata)
return () => {
audio.pause()
audio.removeEventListener('play', handlePlay)
audio.removeEventListener('pause', handlePause)
audio.removeEventListener('ended', handleEnded)
audio.removeEventListener('timeupdate', handleTimeUpdate)
audio.removeEventListener('loadedmetadata', handleLoadedMetadata)
}
}, [isVoice])
// 生成波形数据
useEffect(() => {
if (!voiceDataUrl) {
setVoiceWaveform([])
return
}
const generateWaveform = async () => {
try {
// 从 data:audio/wav;base64,... 提取 base64
const base64 = voiceDataUrl.split(',')[1]
const binaryString = window.atob(base64)
const bytes = new Uint8Array(binaryString.length)
for (let i = 0; i < binaryString.length; i++) {
bytes[i] = binaryString.charCodeAt(i)
}
const audioCtx = new (window.AudioContext || (window as any).webkitAudioContext)()
const audioBuffer = await audioCtx.decodeAudioData(bytes.buffer)
const rawData = audioBuffer.getChannelData(0) // 获取单声道数据
const samples = 35 // 波形柱子数量
const blockSize = Math.floor(rawData.length / samples)
const filteredData: number[] = []
for (let i = 0; i < samples; i++) {
let blockStart = blockSize * i
let sum = 0
for (let j = 0; j < blockSize; j++) {
sum = sum + Math.abs(rawData[blockStart + j])
}
filteredData.push(sum / blockSize)
}
// 归一化
const multiplier = Math.pow(Math.max(...filteredData), -1)
const normalizedData = filteredData.map(n => n * multiplier)
setVoiceWaveform(normalizedData)
void audioCtx.close()
} catch (e) {
console.error('Failed to generate waveform:', e)
// 降级:生成随机但平滑的波形
setVoiceWaveform(Array.from({ length: 35 }, () => 0.2 + Math.random() * 0.8))
}
}
void generateWaveform()
}, [voiceDataUrl])
// 消息加载时自动检测语音缓存
useEffect(() => {
if (!isVoice || voiceDataUrl) return
window.electronAPI.chat.resolveVoiceCache(session.username, String(message.localId))
.then(result => {
if (result.success && result.hasCache && result.data) {
const url = `data:audio/wav;base64,${result.data}`
voiceDataUrlCache.set(voiceCacheKey, url)
setVoiceDataUrl(url)
}
})
}, [isVoice, message.localId, session.username, voiceCacheKey, voiceDataUrl])
// 监听流式转写结果
useEffect(() => {
if (!isVoice) return
@@ -1734,7 +1812,7 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat, o
// 监听模型下载完成事件
useEffect(() => {
if (!isVoice) return
const handleModelDownloaded = (event: CustomEvent) => {
if (event.detail?.messageId === String(message.localId)) {
// 重置状态,允许重新尝试转写
@@ -1744,7 +1822,7 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat, o
void requestVoiceTranscript()
}
}
window.addEventListener('model-downloaded', handleModelDownloaded as EventListener)
return () => {
window.removeEventListener('model-downloaded', handleModelDownloaded as EventListener)
@@ -1932,6 +2010,17 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat, o
}
}
const handleSeek = (e: React.MouseEvent<HTMLDivElement>) => {
if (!voiceDataUrl || !voiceAudioRef.current) return
e.stopPropagation()
const rect = e.currentTarget.getBoundingClientRect()
const x = e.clientX - rect.left
const percentage = x / rect.width
const newTime = percentage * voiceDuration
voiceAudioRef.current.currentTime = newTime
setVoiceCurrentTime(newTime)
}
const showDecryptHint = !voiceDataUrl && !voiceLoading && !isVoicePlaying
const showTranscript = Boolean(voiceDataUrl) && (voiceTranscriptLoading || voiceTranscriptError || voiceTranscript !== undefined)
const transcriptText = (voiceTranscript || '').trim()
@@ -1960,12 +2049,30 @@ function MessageBubble({ message, session, showTime, myAvatarUrl, isGroupChat, o
>
{isVoicePlaying ? <Pause size={16} /> : <Play size={16} />}
</button>
<div className="voice-wave">
<span />
<span />
<span />
<span />
<span />
<div className="voice-wave" onClick={handleSeek}>
{voiceDataUrl && voiceWaveform.length > 0 ? (
<div className="voice-waveform">
{voiceWaveform.map((amplitude, i) => {
const progress = (voiceCurrentTime / (voiceDuration || 1))
const isPlayed = (i / voiceWaveform.length) < progress
return (
<div
key={i}
className={`waveform-bar ${isPlayed ? 'played' : ''}`}
style={{ height: `${Math.max(20, amplitude * 100)}%` }}
/>
)
})}
</div>
) : (
<div className="voice-wave-placeholder">
<span />
<span />
<span />
<span />
<span />
</div>
)}
</div>
<div className="voice-info">
<span className="voice-label"></span>

View File

@@ -13,7 +13,7 @@
justify-content: space-between;
margin-bottom: 20px;
flex-shrink: 0;
h1 {
font-size: 24px;
font-weight: 600;
@@ -51,12 +51,12 @@
transition: all 0.2s;
background: transparent;
color: var(--text-secondary);
&:hover {
color: var(--text-primary);
background: var(--bg-secondary);
}
&.active {
background: var(--card-bg);
color: var(--primary);
@@ -68,15 +68,15 @@
flex: 1;
overflow-y: auto;
padding-right: 8px;
&::-webkit-scrollbar {
width: 6px;
}
&::-webkit-scrollbar-track {
background: transparent;
}
&::-webkit-scrollbar-thumb {
background: var(--border-color);
border-radius: 3px;
@@ -87,7 +87,7 @@
background: var(--bg-secondary);
border-radius: 16px;
padding: 24px;
.section-desc {
font-size: 13px;
color: var(--text-tertiary);
@@ -110,7 +110,7 @@
border-radius: 10px;
margin-bottom: 20px;
color: var(--text-secondary);
p {
margin: 0;
font-size: 14px;
@@ -124,24 +124,24 @@
.form-group {
margin-bottom: 20px;
&:last-child {
margin-bottom: 0;
}
label {
display: block;
font-size: 14px;
font-weight: 500;
color: var(--text-primary);
margin-bottom: 2px;
.optional {
font-weight: 400;
color: var(--text-tertiary);
}
}
.form-hint {
display: block;
font-size: 12px;
@@ -171,7 +171,7 @@
margin: 0;
}
}
.key-status {
display: block;
font-size: 13px;
@@ -179,7 +179,7 @@
margin-bottom: 10px;
animation: pulse 1.5s ease-in-out infinite;
}
input {
width: 100%;
padding: 10px 16px;
@@ -189,16 +189,16 @@
background: var(--bg-primary);
color: var(--text-primary);
margin-bottom: 10px;
&:focus {
outline: none;
border-color: var(--primary);
}
&::placeholder {
color: var(--text-tertiary);
}
&:read-only {
cursor: pointer;
}
@@ -220,18 +220,18 @@
border-color: var(--primary);
}
}
.input-with-toggle {
position: relative;
display: flex;
align-items: center;
margin-bottom: 10px;
input {
margin-bottom: 0;
padding-right: 70px;
}
.toggle-visibility {
position: absolute;
right: 12px;
@@ -243,7 +243,7 @@
color: var(--text-secondary);
cursor: pointer;
transition: all 0.2s;
&:hover {
background: var(--border-color);
color: var(--text-primary);
@@ -253,6 +253,19 @@
}
.whisper-section {
background: color-mix(in srgb, var(--primary) 3%, transparent);
border: 1px solid var(--border-color);
border-radius: 16px;
padding: 20px;
margin-top: 24px;
label {
font-size: 15px;
font-weight: 600;
color: var(--text-primary);
margin-bottom: 4px;
}
.whisper-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
@@ -273,70 +286,148 @@
.whisper-status-line {
display: flex;
align-items: center;
gap: 8px;
gap: 10px;
font-size: 12px;
color: var(--text-secondary);
margin: 4px 0 10px;
margin: 12px 0 16px;
padding: 10px 14px;
background: var(--bg-primary);
border-radius: 12px;
border: 1px solid var(--border-color);
.status {
padding: 2px 8px;
padding: 4px 10px;
border-radius: 999px;
font-size: 11px;
font-weight: 500;
background: var(--bg-tertiary);
color: var(--text-secondary);
font-weight: 600;
white-space: nowrap;
}
.status.ok {
background: rgba(16, 185, 129, 0.12);
color: #059669;
background: rgba(16, 185, 129, 0.1);
color: #10b981;
border: 1px solid rgba(16, 185, 129, 0.2);
}
.status.warn {
background: rgba(245, 158, 11, 0.12);
color: #d97706;
background: rgba(245, 158, 11, 0.1);
color: #f59e0b;
border: 1px solid rgba(245, 158, 11, 0.2);
}
.path {
flex: 1;
min-width: 0;
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace;
font-size: 11px;
color: var(--text-tertiary);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
opacity: 0.8;
}
}
.whisper-progress {
display: flex;
align-items: center;
gap: 12px;
flex-direction: column;
gap: 8px;
width: 100%;
max-width: 320px;
margin-top: 10px;
.progress-bar-container {
display: flex;
align-items: center;
gap: 12px;
}
.progress-bar {
flex: 1;
height: 6px;
height: 8px;
background: var(--bg-tertiary);
border-radius: 999px;
overflow: hidden;
box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
.progress-fill {
height: 100%;
background: var(--primary);
background: linear-gradient(90deg, var(--primary) 0%, var(--primary-hover) 100%);
border-radius: 999px;
transition: width 0.2s ease;
transition: width 0.3s cubic-bezier(0.4, 0, 0.2, 1);
position: relative;
&::after {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: linear-gradient(90deg,
rgba(255, 255, 255, 0) 0%,
rgba(255, 255, 255, 0.2) 50%,
rgba(255, 255, 255, 0) 100%);
animation: progress-shimmer 2s infinite;
}
}
}
span {
font-size: 12px;
color: var(--text-secondary);
min-width: 36px;
text-align: right;
.progress-info {
display: flex;
justify-content: space-between;
align-items: center;
span {
font-size: 12px;
color: var(--text-secondary);
font-weight: 500;
&.percent {
color: var(--primary);
font-weight: 600;
}
}
}
}
.btn-download-model {
width: 100%;
height: 44px;
justify-content: center;
font-size: 15px;
font-weight: 600;
margin-top: 8px;
background: linear-gradient(135deg, var(--primary) 0%, var(--primary-hover) 100%);
box-shadow: 0 4px 12px color-mix(in srgb, var(--primary) 20%, transparent);
border: 1px solid rgba(255, 255, 255, 0.1);
&:hover:not(:disabled) {
transform: translateY(-1px);
box-shadow: 0 6px 16px color-mix(in srgb, var(--primary) 30%, transparent);
}
&:active:not(:disabled) {
transform: translateY(0);
}
svg {
transition: transform 0.2s;
}
&:hover svg {
transform: translateY(2px);
}
}
}
@keyframes progress-shimmer {
from {
transform: translateX(-100%);
}
to {
transform: translateX(100%);
}
}
.log-toggle-line {
@@ -355,8 +446,8 @@
.language-checkboxes {
display: flex;
flex-wrap: wrap;
gap: 16px;
margin-top: 8px;
gap: 10px;
margin-top: 10px;
}
.language-checkbox {
@@ -365,21 +456,56 @@
gap: 8px;
cursor: pointer;
user-select: none;
position: relative;
input[type="checkbox"] {
width: 18px;
height: 18px;
position: absolute;
opacity: 0;
cursor: pointer;
accent-color: var(--primary);
height: 0;
width: 0;
}
.checkbox-label {
.checkbox-custom {
display: flex;
align-items: center;
gap: 8px;
padding: 8px 16px;
background: var(--bg-primary);
border: 1.5px solid var(--border-color);
border-radius: 12px;
font-size: 14px;
font-weight: 500;
color: var(--text-secondary);
transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1);
svg {
opacity: 0;
transform: scale(0.5);
transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1);
}
}
&:hover .checkbox-custom {
border-color: var(--text-tertiary);
background: var(--bg-tertiary);
color: var(--text-primary);
}
&:hover .checkbox-label {
input:checked+.checkbox-custom {
background: color-mix(in srgb, var(--primary) 10%, transparent);
border-color: var(--primary);
color: var(--primary);
box-shadow: 0 4px 12px color-mix(in srgb, var(--primary) 10%, transparent);
svg {
opacity: 1;
transform: scale(1);
}
}
&:active .checkbox-custom {
transform: scale(0.96);
}
}
@@ -422,12 +548,12 @@
transition: all 0.2s ease;
}
.switch-input:checked + .switch-slider {
.switch-input:checked+.switch-slider {
background: var(--primary);
border-color: var(--primary);
}
.switch-input:checked + .switch-slider::before {
.switch-input:checked+.switch-slider::before {
transform: translateX(22px);
background: #ffffff;
}
@@ -456,7 +582,7 @@
font-weight: 500;
cursor: pointer;
transition: all 0.2s;
&:disabled {
opacity: 0.6;
cursor: not-allowed;
@@ -466,25 +592,40 @@
.btn-primary {
background: var(--primary);
color: white;
box-shadow: 0 2px 6px color-mix(in srgb, var(--primary) 15%, transparent);
&:hover:not(:disabled) {
background: var(--primary-hover);
box-shadow: 0 4px 12px color-mix(in srgb, var(--primary) 25%, transparent);
transform: translateY(-1px);
}
&:active:not(:disabled) {
transform: translateY(0);
}
}
.btn-secondary {
background: var(--bg-tertiary);
color: var(--text-primary);
border: 1px solid var(--border-color);
&:hover:not(:disabled) {
background: var(--border-color);
background: var(--bg-primary);
border-color: var(--text-tertiary);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
transform: translateY(-1px);
}
&:active:not(:disabled) {
transform: translateY(0);
}
}
.btn-danger {
background: var(--danger);
color: white;
&:hover:not(:disabled) {
opacity: 0.9;
}
@@ -513,12 +654,12 @@
font-size: 14px;
z-index: 100;
animation: slideDown 0.3s ease;
&.success {
background: var(--primary);
color: white;
}
&.error {
background: var(--danger);
color: white;
@@ -530,6 +671,7 @@
opacity: 0;
transform: translateX(-50%) translateY(-10px);
}
to {
opacity: 1;
transform: translateX(-50%) translateY(0);
@@ -537,9 +679,12 @@
}
@keyframes pulse {
0%, 100% {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.6;
}
@@ -554,7 +699,7 @@
background: var(--bg-tertiary);
border-radius: 12px;
width: fit-content;
.mode-btn {
display: flex;
align-items: center;
@@ -568,11 +713,11 @@
transition: all 0.2s;
background: transparent;
color: var(--text-secondary);
&:hover {
color: var(--text-primary);
}
&.active {
background: var(--card-bg);
color: var(--primary);
@@ -595,26 +740,26 @@
cursor: pointer;
transition: all 0.2s;
background: var(--bg-primary);
&:hover {
border-color: var(--text-tertiary);
}
&.active {
border-color: var(--primary);
.theme-preview {
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
}
}
.theme-preview {
height: 60px;
border-radius: 8px;
margin-bottom: 8px;
position: relative;
overflow: hidden;
.theme-accent {
position: absolute;
bottom: 8px;
@@ -625,24 +770,24 @@
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
}
}
.theme-info {
display: flex;
flex-direction: column;
gap: 2px;
.theme-name {
font-size: 13px;
font-weight: 500;
color: var(--text-primary);
}
.theme-desc {
font-size: 11px;
color: var(--text-tertiary);
}
}
.theme-check {
position: absolute;
top: 8px;
@@ -802,8 +947,13 @@
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
@@ -909,7 +1059,7 @@
position: relative;
display: flex;
align-items: center;
input {
flex: 1;
padding-right: 36px;
@@ -930,11 +1080,11 @@
align-items: center;
justify-content: center;
transition: all 0.2s;
&:hover {
color: var(--text-primary);
}
&.open {
transform: rotate(180deg);
}
@@ -962,21 +1112,21 @@
padding: 10px 12px;
cursor: pointer;
transition: background 0.15s;
&:hover {
background: var(--bg-tertiary);
}
&.active {
background: var(--primary-light);
color: var(--primary);
}
.wxid-value {
font-weight: 500;
font-size: 13px;
}
.wxid-time {
font-size: 11px;
color: var(--text-tertiary);
@@ -1007,14 +1157,14 @@
.wxid-dialog-header {
padding: 20px 24px;
border-bottom: 1px solid var(--border-primary);
h3 {
margin: 0 0 4px;
font-size: 18px;
font-weight: 600;
color: var(--text-primary);
}
p {
margin: 0;
font-size: 13px;
@@ -1036,25 +1186,25 @@
border-radius: 10px;
cursor: pointer;
transition: all 0.15s;
&:hover {
background: var(--bg-tertiary);
}
&.active {
background: var(--primary-light);
.wxid-id {
color: var(--primary);
}
}
.wxid-id {
font-size: 14px;
font-weight: 600;
color: var(--text-primary);
}
.wxid-date {
font-size: 12px;
color: var(--text-tertiary);
@@ -1066,4 +1216,4 @@
border-top: 1px solid var(--border-primary);
display: flex;
justify-content: flex-end;
}
}

View File

@@ -126,14 +126,14 @@ function SettingsPage() {
setLogEnabled(savedLogEnabled)
setAutoTranscribeVoice(savedAutoTranscribe)
setTranscribeLanguages(savedTranscribeLanguages)
// 如果语言列表为空,保存默认值
if (!savedTranscribeLanguages || savedTranscribeLanguages.length === 0) {
const defaultLanguages = ['zh']
setTranscribeLanguages(defaultLanguages)
await configService.setTranscribeLanguages(defaultLanguages)
}
if (savedWhisperModelDir) setWhisperModelDir(savedWhisperModelDir)
} catch (e) {
console.error('加载配置失败:', e)
@@ -776,6 +776,7 @@ function SettingsPage() {
<div className="language-checkboxes">
{[
{ code: 'zh', name: '中文' },
{ code: 'yue', name: '粤语' },
{ code: 'en', name: '英文' },
{ code: 'ja', name: '日文' },
{ code: 'ko', name: '韩文' }
@@ -787,32 +788,33 @@ function SettingsPage() {
onChange={async (e) => {
const checked = e.target.checked
let newLanguages: string[]
if (checked) {
// 添加语言
newLanguages = [...transcribeLanguages, lang.code]
} else {
// 移除语言,但至少保留一个
if (transcribeLanguages.length <= 1) {
showMessage('至少需要选择一种语言', false)
return
}
newLanguages = transcribeLanguages.filter(l => l !== lang.code)
}
setTranscribeLanguages(newLanguages)
await configService.setTranscribeLanguages(newLanguages)
showMessage(`${checked ? '添加' : '移除'}${lang.name}`, true)
}}
/>
<span className="checkbox-label">{lang.name}</span>
<div className="checkbox-custom">
<Check size={14} />
<span>{lang.name}</span>
</div>
</label>
))}
</div>
</div>
<div className="form-group whisper-section">
<label> (SenseVoiceSmall)</label>
<span className="form-hint"> Sherpa-onnx</span>
<span className="form-hint"> Sherpa-onnx/</span>
<span className="form-hint"></span>
<input
type="text"
@@ -833,14 +835,19 @@ function SettingsPage() {
</div>
{isWhisperDownloading ? (
<div className="whisper-progress">
<div className="progress-bar">
<div className="progress-fill" style={{ width: `${whisperDownloadProgress}%` }} />
<div className="progress-info">
<span>...</span>
<span className="percent">{whisperDownloadProgress.toFixed(0)}%</span>
</div>
<div className="progress-bar-container">
<div className="progress-bar">
<div className="progress-fill" style={{ width: `${whisperDownloadProgress}%` }} />
</div>
</div>
<span>{whisperDownloadProgress.toFixed(0)}%</span>
</div>
) : (
<button className="btn btn-primary" onClick={handleDownloadWhisperModel}>
<Download size={16} />
<button className="btn btn-primary btn-download-model" onClick={handleDownloadWhisperModel}>
<Download size={18} />
</button>
)}
</div>

View File

@@ -95,6 +95,7 @@ export interface ElectronAPI {
}>
getImageData: (sessionId: string, msgId: string) => Promise<{ success: boolean; data?: string; error?: string }>
getVoiceData: (sessionId: string, msgId: string, createTime?: number, serverId?: string | number) => Promise<{ success: boolean; data?: string; error?: string }>
resolveVoiceCache: (sessionId: string, msgId: string) => Promise<{ success: boolean; hasCache: boolean; data?: string }>
getVoiceTranscript: (sessionId: string, msgId: string) => Promise<{ success: boolean; transcript?: string; error?: string }>
onVoiceTranscriptPartial: (callback: (payload: { msgId: string; text: string }) => void) => () => void
}