mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-05-15 16:58:54 +08:00
Compare commits
45 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7167310ccd | |||
| 263667a2d4 | |||
| d5cef291f6 | |||
| c8d166e833 | |||
| 6e25782d8b | |||
| c3127f7e84 | |||
| e8bc173cd7 | |||
| 4d1cdf5207 | |||
| 57a473364e | |||
| 40b62e9d38 | |||
| ead5f9926b | |||
| 814b6753c2 | |||
| ce505251f8 | |||
| 5d2a987aaa | |||
| 4d67e08723 | |||
| 2e71dd5fe2 | |||
| c3b9643227 | |||
| 0aad5dc2b7 | |||
| cec900168f | |||
| f9b1c403d5 | |||
| 9024b602f5 | |||
| c139fd9a57 | |||
| e299b68163 | |||
| 7777a53a82 | |||
| 3e185dbbfe | |||
| e8a32af369 | |||
| 7b0ec6687e | |||
| ec1c6c7b92 | |||
| 8dfaa86760 | |||
| 323aebd1be | |||
| 436c038a2f | |||
| ccd50ec6c0 | |||
| a7541c2c0f | |||
| c3a57d756c | |||
| aa300a4c98 | |||
| 83ea7352b9 | |||
| 9050712cd8 | |||
| 8d92fdbb6e | |||
| a2442ec1b9 | |||
| 71662c9cd9 | |||
| 54ff5dbcc2 | |||
| 4ab7bd3b51 | |||
| ef3c61a297 | |||
| abf79bf60c | |||
| 5d3cecd926 |
@@ -14,6 +14,7 @@ tmp
|
||||
plugins.json
|
||||
itchat.pkl
|
||||
*.log
|
||||
logs/
|
||||
user_datas.pkl
|
||||
chatgpt_tool_hub/
|
||||
plugins/**/
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
# 简介
|
||||
<p align="center"><img src= "https://github.com/user-attachments/assets/31fb4eab-3be4-477d-aa76-82cf62bfd12c" alt="Chatgpt-on-Wechat" width="600" /></p>
|
||||
|
||||
> chatgpt-on-wechat(简称CoW)项目是基于大模型的智能对话机器人,支持微信公众号、企业微信应用、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/Gemini/LinkAI/ChatGLM/KIMI/文心一言/讯飞星火/通义千问/LinkAI,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。
|
||||
<p align="center">
|
||||
<a href="https://github.com/zhayujie/chatgpt-on-wechat/releases/latest"><img src="https://img.shields.io/github/v/release/zhayujie/chatgpt-on-wechat" alt="Latest release"></a>
|
||||
<a href="https://github.com/zhayujie/chatgpt-on-wechat/blob/master/LICENSE"><img src="https://img.shields.io/github/license/zhayujie/chatgpt-on-wechat" alt="License: MIT"></a>
|
||||
<a href="https://github.com/zhayujie/chatgpt-on-wechat"><img src="https://img.shields.io/github/stars/zhayujie/chatgpt-on-wechat?style=flat-square" alt="Stars"></a> <br/>
|
||||
</p>
|
||||
|
||||
chatgpt-on-wechat(简称CoW)项目是基于大模型的智能对话机器人,支持微信公众号、企业微信应用、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/Gemini/LinkAI/ChatGLM/KIMI/文心一言/讯飞星火/通义千问/LinkAI/ModelScope,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。
|
||||
|
||||
# 简介
|
||||
|
||||
最新版本支持的功能如下:
|
||||
|
||||
- ✅ **多端部署:** 有多种部署方式可选择且功能完备,目前已支持微信公众号、企业微信应用、飞书、钉钉等部署方式
|
||||
- ✅ **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4o-mini, GPT-4o, GPT-4, Claude-3.5, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4,Kimi(月之暗面), MiniMax
|
||||
- ✅ **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4o-mini, GPT-4o, GPT-4, Claude-3.5, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4,Kimi(月之暗面), MiniMax, GiteeAI, ModelScope(魔搭社区)
|
||||
- ✅ **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型
|
||||
- ✅ **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, CogView-3, vision模型
|
||||
- ✅ **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话、联网搜索等插件
|
||||
@@ -45,6 +53,10 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
|
||||
<br>
|
||||
|
||||
# 🏷 更新日志
|
||||
>**2025.04.11:** [1.7.5版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.5) 新增支持 [wechatferry](https://github.com/zhayujie/chatgpt-on-wechat/pull/2562) 协议、新增 deepseek 模型、新增支持腾讯云语音能力、新增支持 ModelScope 和 Gitee-AI API接口
|
||||
|
||||
>**2024.12.13:** [1.7.4版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.4) 新增 Gemini 2.0 模型、新增web channel、解决内存泄漏问题、解决 `#reloadp` 命令重载不生效问题
|
||||
|
||||
>**2024.10.31:** [1.7.3版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.3) 程序稳定性提升、数据库功能、Claude模型优化、linkai插件优化、离线通知
|
||||
|
||||
>**2024.09.26:** [1.7.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.2) 和 [1.7.1版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.1) 文心,讯飞等模型优化、o1 模型、快速安装和管理脚本
|
||||
|
||||
@@ -68,5 +68,9 @@ def create_bot(bot_type):
|
||||
from bot.minimax.minimax_bot import MinimaxBot
|
||||
return MinimaxBot()
|
||||
|
||||
elif bot_type == const.MODELSCOPE:
|
||||
from bot.modelscope.modelscope_bot import ModelScopeBot
|
||||
return ModelScopeBot()
|
||||
|
||||
|
||||
raise RuntimeError
|
||||
|
||||
@@ -83,7 +83,7 @@ def num_tokens_from_messages(messages, model):
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
|
||||
logger.debug(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
|
||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
|
||||
@@ -147,9 +147,9 @@ class LinkAIBot(Bot):
|
||||
if response["choices"][0].get("img_urls"):
|
||||
thread = threading.Thread(target=self._send_image, args=(context.get("channel"), context, response["choices"][0].get("img_urls")))
|
||||
thread.start()
|
||||
if response["choices"][0].get("text_content"):
|
||||
reply_content = response["choices"][0].get("text_content")
|
||||
reply_content = self._process_url(reply_content)
|
||||
reply_content = response["choices"][0].get("text_content")
|
||||
if reply_content:
|
||||
reply_content = self._process_url(reply_content)
|
||||
return Reply(ReplyType.TEXT, reply_content)
|
||||
|
||||
else:
|
||||
|
||||
@@ -0,0 +1,277 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import time
|
||||
import json
|
||||
import openai
|
||||
import openai.error
|
||||
from bot.bot import Bot
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from config import conf, load_config
|
||||
from .modelscope_session import ModelScopeSession
|
||||
import requests
|
||||
|
||||
|
||||
# ModelScope对话模型API
|
||||
class ModelScopeBot(Bot):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.sessions = SessionManager(ModelScopeSession, model=conf().get("model") or "Qwen/Qwen2.5-7B-Instruct")
|
||||
model = conf().get("model") or "Qwen/Qwen2.5-7B-Instruct"
|
||||
if model == "modelscope":
|
||||
model = "Qwen/Qwen2.5-7B-Instruct"
|
||||
self.args = {
|
||||
"model": model, # 对话模型的名称
|
||||
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
|
||||
"top_p": conf().get("top_p", 1.0), # 使用默认值
|
||||
}
|
||||
self.api_key = conf().get("modelscope_api_key")
|
||||
self.base_url = conf().get("modelscope_base_url", "https://api-inference.modelscope.cn/v1/chat/completions")
|
||||
"""
|
||||
需要获取ModelScope支持API-inference的模型名称列表,请到魔搭社区官网模型中心查看 https://modelscope.cn/models?filter=inference_type&page=1。
|
||||
或者使用命令 curl https://api-inference.modelscope.cn/v1/models 对模型列表和ID进行获取。查看commend/const.py文件也可以获取模型列表。
|
||||
获取ModelScope的免费API Key,请到魔搭社区官网用户中心查看获取方式 https://modelscope.cn/docs/model-service/API-Inference/intro。
|
||||
"""
|
||||
def reply(self, query, context=None):
|
||||
# acquire reply content
|
||||
if context.type == ContextType.TEXT:
|
||||
logger.info("[MODELSCOPE_AI] query={}".format(query))
|
||||
|
||||
session_id = context["session_id"]
|
||||
reply = None
|
||||
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
|
||||
if query in clear_memory_commands:
|
||||
self.sessions.clear_session(session_id)
|
||||
reply = Reply(ReplyType.INFO, "记忆已清除")
|
||||
elif query == "#清除所有":
|
||||
self.sessions.clear_all_session()
|
||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
|
||||
elif query == "#更新配置":
|
||||
load_config()
|
||||
reply = Reply(ReplyType.INFO, "配置已更新")
|
||||
if reply:
|
||||
return reply
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
logger.debug("[MODELSCOPE_AI] session query={}".format(session.messages))
|
||||
|
||||
model = context.get("modelscope_model")
|
||||
new_args = self.args.copy()
|
||||
if model:
|
||||
new_args["model"] = model
|
||||
|
||||
if new_args["model"] == "Qwen/QwQ-32B":
|
||||
reply_content = self.reply_text_stream(session, args=new_args)
|
||||
else:
|
||||
reply_content = self.reply_text(session, args=new_args)
|
||||
|
||||
logger.debug(
|
||||
"[MODELSCOPE_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
|
||||
session.messages,
|
||||
session_id,
|
||||
reply_content["content"],
|
||||
reply_content["completion_tokens"],
|
||||
)
|
||||
)
|
||||
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
|
||||
# 只有当 content 为空且 completion_tokens 为 0 时才标记为错误
|
||||
if len(reply_content["content"]) == 0:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
else:
|
||||
reply = Reply(ReplyType.TEXT, reply_content["content"])
|
||||
elif reply_content["completion_tokens"] > 0:
|
||||
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
|
||||
reply = Reply(ReplyType.TEXT, reply_content["content"])
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
logger.debug("[MODELSCOPE_AI] reply {} used 0 tokens.".format(reply_content))
|
||||
return reply
|
||||
elif context.type == ContextType.IMAGE_CREATE:
|
||||
ok, retstring = self.create_img(query, 0)
|
||||
reply = None
|
||||
if ok:
|
||||
reply = Reply(ReplyType.IMAGE_URL, retstring)
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, retstring)
|
||||
return reply
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
|
||||
return reply
|
||||
|
||||
def reply_text(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
|
||||
"""
|
||||
call openai's ChatCompletion to get the answer
|
||||
:param session: a conversation session
|
||||
:param session_id: session id
|
||||
:param retry_count: retry count
|
||||
:return: {}
|
||||
"""
|
||||
try:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer " + self.api_key
|
||||
}
|
||||
|
||||
body = args
|
||||
body["messages"] = session.messages
|
||||
res = requests.post(
|
||||
self.base_url,
|
||||
headers=headers,
|
||||
data=json.dumps(body)
|
||||
)
|
||||
|
||||
if res.status_code == 200:
|
||||
response = res.json()
|
||||
return {
|
||||
"total_tokens": response["usage"]["total_tokens"],
|
||||
"completion_tokens": response["usage"]["completion_tokens"],
|
||||
"content": response["choices"][0]["message"]["content"]
|
||||
}
|
||||
else:
|
||||
response = res.json()
|
||||
if "errors" in response:
|
||||
error = response.get("errors")
|
||||
elif "error" in response:
|
||||
error = response.get("error")
|
||||
else:
|
||||
error = "Unknown error"
|
||||
logger.error(f"[MODELSCOPE_AI] chat failed, status_code={res.status_code}, "
|
||||
f"msg={error.get('message')}, type={error.get('type')}")
|
||||
|
||||
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
|
||||
need_retry = False
|
||||
if res.status_code >= 500:
|
||||
# server error, need retry
|
||||
logger.warn(f"[MODELSCOPE_AI] do retry, times={retry_count}")
|
||||
need_retry = retry_count < 2
|
||||
elif res.status_code == 401:
|
||||
result["content"] = "授权失败,请检查API Key是否正确"
|
||||
elif res.status_code == 429:
|
||||
result["content"] = "请求过于频繁,请稍后再试"
|
||||
need_retry = retry_count < 2
|
||||
else:
|
||||
need_retry = False
|
||||
|
||||
if need_retry:
|
||||
time.sleep(3)
|
||||
return self.reply_text(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if need_retry:
|
||||
return self.reply_text(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
|
||||
def reply_text_stream(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
|
||||
"""
|
||||
call ModelScope's ChatCompletion to get the answer with stream response
|
||||
:param session: a conversation session
|
||||
:param session_id: session id
|
||||
:param retry_count: retry count
|
||||
:return: {}
|
||||
"""
|
||||
try:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer " + self.api_key
|
||||
}
|
||||
|
||||
body = args
|
||||
body["messages"] = session.messages
|
||||
body["stream"] = True # 启用流式响应
|
||||
|
||||
res = requests.post(
|
||||
self.base_url,
|
||||
headers=headers,
|
||||
data=json.dumps(body),
|
||||
stream=True
|
||||
)
|
||||
if res.status_code == 200:
|
||||
content = ""
|
||||
for line in res.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith("data: "):
|
||||
try:
|
||||
json_data = json.loads(decoded_line[6:])
|
||||
delta_content = json_data.get("choices", [{}])[0].get("delta", {}).get("content", "")
|
||||
if delta_content:
|
||||
content += delta_content
|
||||
except json.JSONDecodeError as e:
|
||||
pass
|
||||
return {
|
||||
"total_tokens": 1, # 流式响应通常不返回token使用情况
|
||||
"completion_tokens": 1,
|
||||
"content": content
|
||||
}
|
||||
else:
|
||||
response = res.json()
|
||||
if "errors" in response:
|
||||
error = response.get("errors")
|
||||
elif "error" in response:
|
||||
error = response.get("error")
|
||||
else:
|
||||
error = "Unknown error"
|
||||
logger.error(f"[MODELSCOPE_AI] chat failed, status_code={res.status_code}, "
|
||||
f"msg={error.get('message')}, type={error.get('type')}")
|
||||
|
||||
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
|
||||
need_retry = False
|
||||
if res.status_code >= 500:
|
||||
# server error, need retry
|
||||
logger.warn(f"[MODELSCOPE_AI] do retry, times={retry_count}")
|
||||
need_retry = retry_count < 2
|
||||
elif res.status_code == 401:
|
||||
result["content"] = "授权失败,请检查API Key是否正确"
|
||||
elif res.status_code == 429:
|
||||
result["content"] = "请求过于频繁,请稍后再试"
|
||||
need_retry = retry_count < 2
|
||||
else:
|
||||
need_retry = False
|
||||
|
||||
if need_retry:
|
||||
time.sleep(3)
|
||||
return self.reply_text_stream(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if need_retry:
|
||||
return self.reply_text_stream(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
def create_img(self, query, retry_count=0):
|
||||
try:
|
||||
logger.info("[ModelScopeImage] image_query={}".format(query))
|
||||
headers = {
|
||||
"Content-Type": "application/json; charset=utf-8", # 明确指定编码
|
||||
"Authorization": f"Bearer {self.api_key}"
|
||||
}
|
||||
payload = {
|
||||
"prompt": query, # required
|
||||
"n": 1,
|
||||
"model": conf().get("text_to_image"),
|
||||
}
|
||||
url = "https://api-inference.modelscope.cn/v1/images/generations"
|
||||
|
||||
# 手动序列化并保留中文(禁用 ASCII 转义)
|
||||
json_payload = json.dumps(payload, ensure_ascii=False).encode('utf-8')
|
||||
|
||||
# 使用 data 参数发送原始字符串(requests 会自动处理编码)
|
||||
res = requests.post(url, headers=headers, data=json_payload)
|
||||
|
||||
response_data = res.json()
|
||||
image_url = response_data['images'][0]['url']
|
||||
logger.info("[ModelScopeImage] image_url={}".format(image_url))
|
||||
return True, image_url
|
||||
|
||||
except Exception as e:
|
||||
logger.error(format(e))
|
||||
return False, "画图出现问题,请休息一下再问我吧"
|
||||
@@ -0,0 +1,51 @@
|
||||
from bot.session_manager import Session
|
||||
from common.log import logger
|
||||
|
||||
|
||||
class ModelScopeSession(Session):
|
||||
def __init__(self, session_id, system_prompt=None, model="Qwen/Qwen2.5-7B-Instruct"):
|
||||
super().__init__(session_id, system_prompt)
|
||||
self.model = model
|
||||
self.reset()
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
precise = True
|
||||
try:
|
||||
cur_tokens = self.calc_tokens()
|
||||
except Exception as e:
|
||||
precise = False
|
||||
if cur_tokens is None:
|
||||
raise e
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
while cur_tokens > max_tokens:
|
||||
if len(self.messages) > 2:
|
||||
self.messages.pop(1)
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
|
||||
self.messages.pop(1)
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
break
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
|
||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
|
||||
break
|
||||
else:
|
||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
|
||||
len(self.messages)))
|
||||
break
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
return cur_tokens
|
||||
|
||||
def calc_tokens(self):
|
||||
return num_tokens_from_messages(self.messages, self.model)
|
||||
|
||||
|
||||
def num_tokens_from_messages(messages, model):
|
||||
tokens = 0
|
||||
for msg in messages:
|
||||
tokens += len(msg["content"])
|
||||
return tokens
|
||||
@@ -41,7 +41,7 @@ class XunFeiBot(Bot):
|
||||
self.api_key = conf().get("xunfei_api_key")
|
||||
self.api_secret = conf().get("xunfei_api_secret")
|
||||
# 默认使用v2.0版本: "generalv2"
|
||||
# Spark Lite请求地址(spark_url): wss://spark-api.xf-yun.com/v1.1/chat, 对应的domain参数为: "general"
|
||||
# Spark Lite请求地址(spark_url): wss://spark-api.xf-yun.com/v1.1/chat, 对应的domain参数为: "lite"
|
||||
# Spark V2.0请求地址(spark_url): wss://spark-api.xf-yun.com/v2.1/chat, 对应的domain参数为: "generalv2"
|
||||
# Spark Pro 请求地址(spark_url): wss://spark-api.xf-yun.com/v3.1/chat, 对应的domain参数为: "generalv3"
|
||||
# Spark Pro-128K请求地址(spark_url): wss://spark-api.xf-yun.com/chat/pro-128k, 对应的domain参数为: "pro-128k"
|
||||
|
||||
@@ -49,6 +49,9 @@ class Bridge(object):
|
||||
if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
|
||||
self.btype["chat"] = const.MOONSHOT
|
||||
|
||||
if model_type in [const.MODELSCOPE]:
|
||||
self.btype["chat"] = const.MODELSCOPE
|
||||
|
||||
if model_type in ["abab6.5-chat"]:
|
||||
self.btype["chat"] = const.MiniMax
|
||||
|
||||
|
||||
@@ -18,6 +18,9 @@ def create_channel(channel_type) -> Channel:
|
||||
elif channel_type == "wxy":
|
||||
from channel.wechat.wechaty_channel import WechatyChannel
|
||||
ch = WechatyChannel()
|
||||
elif channel_type == "wcf":
|
||||
from channel.wechat.wcf_channel import WechatfChannel
|
||||
ch = WechatfChannel()
|
||||
elif channel_type == "terminal":
|
||||
from channel.terminal.terminal_channel import TerminalChannel
|
||||
ch = TerminalChannel()
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
|
||||
#使用方法
|
||||
- 在配置文件中channel_type填入web即可
|
||||
- 访问地址 http://localhost:9899
|
||||
- 访问地址 http://localhost:9899/chat
|
||||
- port可以在配置项 web_port中设置
|
||||
|
||||
@@ -177,7 +177,7 @@ class WebChannel(ChatChannel):
|
||||
|
||||
def startup(self):
|
||||
logger.setLevel("WARN")
|
||||
print("\nWeb Channel is running. Send POST requests to /message to send messages.")
|
||||
print("\nWeb Channel is running, please visit http://localhost:9899/chat")
|
||||
|
||||
urls = (
|
||||
'/sse/(.+)', 'SSEHandler', # 修改路由以接收用户ID
|
||||
|
||||
@@ -0,0 +1,179 @@
|
||||
# encoding:utf-8
|
||||
|
||||
"""
|
||||
wechat channel
|
||||
"""
|
||||
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from queue import Empty
|
||||
from typing import Any
|
||||
|
||||
from bridge.context import *
|
||||
from bridge.reply import *
|
||||
from channel.chat_channel import ChatChannel
|
||||
from channel.wechat.wcf_message import WechatfMessage
|
||||
from common.log import logger
|
||||
from common.singleton import singleton
|
||||
from common.utils import *
|
||||
from config import conf, get_appdata_dir
|
||||
from wcferry import Wcf, WxMsg
|
||||
|
||||
|
||||
@singleton
|
||||
class WechatfChannel(ChatChannel):
|
||||
NOT_SUPPORT_REPLYTYPE = []
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.NOT_SUPPORT_REPLYTYPE = []
|
||||
# 使用字典存储最近消息,用于去重
|
||||
self.received_msgs = {}
|
||||
# 初始化wcferry客户端
|
||||
self.wcf = Wcf()
|
||||
self.wxid = None # 登录后会被设置为当前登录用户的wxid
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
启动通道
|
||||
"""
|
||||
try:
|
||||
# wcferry会自动唤起微信并登录
|
||||
self.wxid = self.wcf.get_self_wxid()
|
||||
self.name = self.wcf.get_user_info().get("name")
|
||||
logger.info(f"微信登录成功,当前用户ID: {self.wxid}, 用户名:{self.name}")
|
||||
self.contact_cache = ContactCache(self.wcf)
|
||||
self.contact_cache.update()
|
||||
# 启动消息接收
|
||||
self.wcf.enable_receiving_msg()
|
||||
# 创建消息处理线程
|
||||
t = threading.Thread(target=self._process_messages, name="WeChatThread", daemon=True)
|
||||
t.start()
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"微信通道启动失败: {e}")
|
||||
raise e
|
||||
|
||||
def _process_messages(self):
|
||||
"""
|
||||
处理消息队列
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
msg = self.wcf.get_msg()
|
||||
if msg:
|
||||
self._handle_message(msg)
|
||||
except Empty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"处理消息失败: {e}")
|
||||
continue
|
||||
|
||||
def _handle_message(self, msg: WxMsg):
|
||||
"""
|
||||
处理单条消息
|
||||
"""
|
||||
try:
|
||||
# 构造消息对象
|
||||
cmsg = WechatfMessage(self, msg)
|
||||
# 消息去重
|
||||
if cmsg.msg_id in self.received_msgs:
|
||||
return
|
||||
self.received_msgs[cmsg.msg_id] = time.time()
|
||||
# 清理过期消息ID
|
||||
self._clean_expired_msgs()
|
||||
|
||||
logger.debug(f"收到消息: {msg}")
|
||||
context = self._compose_context(cmsg.ctype, cmsg.content,
|
||||
isgroup=cmsg.is_group,
|
||||
msg=cmsg)
|
||||
if context:
|
||||
self.produce(context)
|
||||
except Exception as e:
|
||||
logger.error(f"处理消息失败: {e}")
|
||||
|
||||
def _clean_expired_msgs(self, expire_time: float = 60):
|
||||
"""
|
||||
清理过期的消息ID
|
||||
"""
|
||||
now = time.time()
|
||||
for msg_id in list(self.received_msgs.keys()):
|
||||
if now - self.received_msgs[msg_id] > expire_time:
|
||||
del self.received_msgs[msg_id]
|
||||
|
||||
def send(self, reply: Reply, context: Context):
|
||||
"""
|
||||
发送消息
|
||||
"""
|
||||
receiver = context["receiver"]
|
||||
if not receiver:
|
||||
logger.error("receiver is empty")
|
||||
return
|
||||
|
||||
try:
|
||||
if reply.type == ReplyType.TEXT:
|
||||
# 处理@信息
|
||||
at_list = []
|
||||
if context.get("isgroup"):
|
||||
if context["msg"].actual_user_id:
|
||||
at_list = [context["msg"].actual_user_id]
|
||||
at_str = ",".join(at_list) if at_list else ""
|
||||
self.wcf.send_text(reply.content, receiver, at_str)
|
||||
|
||||
elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO:
|
||||
self.wcf.send_text(reply.content, receiver)
|
||||
else:
|
||||
logger.error(f"暂不支持的消息类型: {reply.type}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"发送消息失败: {e}")
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
关闭通道
|
||||
"""
|
||||
try:
|
||||
self.wcf.cleanup()
|
||||
except Exception as e:
|
||||
logger.error(f"关闭通道失败: {e}")
|
||||
|
||||
|
||||
class ContactCache:
|
||||
def __init__(self, wcf):
|
||||
"""
|
||||
wcf: 一个 wcfferry.client.Wcf 实例
|
||||
"""
|
||||
self.wcf = wcf
|
||||
self._contact_map = {} # 形如 {wxid: {完整联系人信息}}
|
||||
|
||||
def update(self):
|
||||
"""
|
||||
更新缓存:调用 get_contacts(),
|
||||
再把 wcf.contacts 构建成 {wxid: {完整信息}} 的字典
|
||||
"""
|
||||
self.wcf.get_contacts()
|
||||
self._contact_map.clear()
|
||||
for item in self.wcf.contacts:
|
||||
wxid = item.get('wxid')
|
||||
if wxid: # 确保有 wxid 字段
|
||||
self._contact_map[wxid] = item
|
||||
|
||||
def get_contact(self, wxid: str) -> dict:
|
||||
"""
|
||||
返回该 wxid 对应的完整联系人 dict,
|
||||
如果没找到就返回 None
|
||||
"""
|
||||
return self._contact_map.get(wxid)
|
||||
|
||||
def get_name_by_wxid(self, wxid: str) -> str:
|
||||
"""
|
||||
通过wxid,获取成员/群名称
|
||||
"""
|
||||
contact = self.get_contact(wxid)
|
||||
if contact:
|
||||
return contact.get('name', '')
|
||||
return ''
|
||||
@@ -0,0 +1,58 @@
|
||||
# encoding:utf-8
|
||||
|
||||
"""
|
||||
wechat channel message
|
||||
"""
|
||||
|
||||
from bridge.context import ContextType
|
||||
from channel.chat_message import ChatMessage
|
||||
from common.log import logger
|
||||
from wcferry import WxMsg
|
||||
|
||||
|
||||
class WechatfMessage(ChatMessage):
|
||||
"""
|
||||
微信消息封装类
|
||||
"""
|
||||
|
||||
def __init__(self, channel, wcf_msg: WxMsg, is_group=False):
|
||||
"""
|
||||
初始化消息对象
|
||||
:param wcf_msg: wcferry消息对象
|
||||
:param is_group: 是否是群消息
|
||||
"""
|
||||
super().__init__(wcf_msg)
|
||||
self.msg_id = wcf_msg.id
|
||||
self.create_time = wcf_msg.ts # 使用消息时间戳
|
||||
self.is_group = is_group or wcf_msg._is_group
|
||||
self.wxid = channel.wxid
|
||||
self.name = channel.name
|
||||
|
||||
# 解析消息类型
|
||||
if wcf_msg.is_text():
|
||||
self.ctype = ContextType.TEXT
|
||||
self.content = wcf_msg.content
|
||||
else:
|
||||
raise NotImplementedError(f"Unsupported message type: {wcf_msg.type}")
|
||||
|
||||
# 设置发送者和接收者信息
|
||||
self.from_user_id = self.wxid if wcf_msg.sender == self.wxid else wcf_msg.sender
|
||||
self.from_user_nickname = self.name if wcf_msg.sender == self.wxid else channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
|
||||
self.to_user_id = self.wxid
|
||||
self.to_user_nickname = self.name
|
||||
self.other_user_id = wcf_msg.sender
|
||||
self.other_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
|
||||
|
||||
# 群消息特殊处理
|
||||
if self.is_group:
|
||||
self.other_user_id = wcf_msg.roomid
|
||||
self.other_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.roomid)
|
||||
self.actual_user_id = wcf_msg.sender
|
||||
self.actual_user_nickname = channel.wcf.get_alias_in_chatroom(wcf_msg.sender, wcf_msg.roomid)
|
||||
if not self.actual_user_nickname: # 群聊获取不到企微号成员昵称,这里尝试从联系人缓存去获取
|
||||
self.actual_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
|
||||
self.room_id = wcf_msg.roomid
|
||||
self.is_at = wcf_msg.is_at(self.wxid) # 是否被@当前登录用户
|
||||
|
||||
# 判断是否是自己发送的消息
|
||||
self.my_msg = wcf_msg.from_self()
|
||||
@@ -117,23 +117,35 @@ class WechatChannel(ChatChannel):
|
||||
|
||||
def startup(self):
|
||||
try:
|
||||
itchat.instance.receivingRetryCount = 600 # 修改断线超时时间
|
||||
# login by scan QRCode
|
||||
hotReload = conf().get("hot_reload", False)
|
||||
status_path = os.path.join(get_appdata_dir(), "itchat.pkl")
|
||||
itchat.auto_login(
|
||||
enableCmdQR=2,
|
||||
hotReload=hotReload,
|
||||
statusStorageDir=status_path,
|
||||
qrCallback=qrCallback,
|
||||
exitCallback=self.exitCallback,
|
||||
loginCallback=self.loginCallback
|
||||
)
|
||||
self.user_id = itchat.instance.storageClass.userName
|
||||
self.name = itchat.instance.storageClass.nickName
|
||||
logger.info("Wechat login success, user_id: {}, nickname: {}".format(self.user_id, self.name))
|
||||
# start message listener
|
||||
itchat.run()
|
||||
time.sleep(3)
|
||||
logger.error("""[WechatChannel] 当前channel暂不可用,目前支持的channel有:
|
||||
1. terminal: 终端
|
||||
2. wechatmp: 个人公众号
|
||||
3. wechatmp_service: 企业公众号
|
||||
4. wechatcom_app: 企微自建应用
|
||||
5. dingtalk: 钉钉
|
||||
6. feishu: 飞书
|
||||
7. web: 网页
|
||||
8. wcf: wechat (需Windows环境,参考 https://github.com/zhayujie/chatgpt-on-wechat/pull/2562 )
|
||||
可修改 config.json 配置文件的 channel_type 字段进行切换""")
|
||||
|
||||
# itchat.instance.receivingRetryCount = 600 # 修改断线超时时间
|
||||
# # login by scan QRCode
|
||||
# hotReload = conf().get("hot_reload", False)
|
||||
# status_path = os.path.join(get_appdata_dir(), "itchat.pkl")
|
||||
# itchat.auto_login(
|
||||
# enableCmdQR=2,
|
||||
# hotReload=hotReload,
|
||||
# statusStorageDir=status_path,
|
||||
# qrCallback=qrCallback,
|
||||
# exitCallback=self.exitCallback,
|
||||
# loginCallback=self.loginCallback
|
||||
# )
|
||||
# self.user_id = itchat.instance.storageClass.userName
|
||||
# self.name = itchat.instance.storageClass.nickName
|
||||
# logger.info("Wechat login success, user_id: {}, nickname: {}".format(self.user_id, self.name))
|
||||
# # start message listener
|
||||
# itchat.run()
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
|
||||
+16
-4
@@ -15,7 +15,7 @@ GEMINI = "gemini" # gemini-1.0-pro
|
||||
ZHIPU_AI = "glm-4"
|
||||
MOONSHOT = "moonshot"
|
||||
MiniMax = "minimax"
|
||||
|
||||
MODELSCOPE = "modelscope"
|
||||
|
||||
# model
|
||||
CLAUDE3 = "claude-3-opus-20240229"
|
||||
@@ -74,14 +74,23 @@ GLM_4_AIRX = "glm-4-airx"
|
||||
|
||||
CLAUDE_3_OPUS = "claude-3-opus-latest"
|
||||
CLAUDE_3_OPUS_0229 = "claude-3-opus-20240229"
|
||||
|
||||
CLAUDE_35_SONNET = "claude-3-5-sonnet-latest" # 带 latest 标签的模型名称,会不断更新指向最新发布的模型
|
||||
CLAUDE_35_SONNET_1022 = "claude-3-5-sonnet-20241022" # 带具体日期的模型名称,会固定为该日期发布的模型
|
||||
CLAUDE_35_SONNET_0620 = "claude-3-5-sonnet-20240620"
|
||||
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
|
||||
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
|
||||
DEEPSEEK_CHAT = "deepseek-chat" # DeepSeek-V3对话模型
|
||||
DEEPSEEK_REASONER = "deepseek-reasoner" # DeepSeek-R1模型
|
||||
|
||||
GITEE_AI_MODEL_LIST = ["Yi-34B-Chat", "InternVL2-8B", "deepseek-coder-33B-instruct", "InternVL2.5-26B", "Qwen2-VL-72B", "Qwen2.5-32B-Instruct", "glm-4-9b-chat", "codegeex4-all-9b", "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Qwen2.5-7B-Instruct", "Qwen2-72B-Instruct", "Qwen2-7B-Instruct", "code-raccoon-v1", "Qwen2.5-14B-Instruct"]
|
||||
|
||||
MODELSCOPE_MODEL_LIST = ["LLM-Research/c4ai-command-r-plus-08-2024","mistralai/Mistral-Small-Instruct-2409","mistralai/Ministral-8B-Instruct-2410","mistralai/Mistral-Large-Instruct-2407",
|
||||
"Qwen/Qwen2.5-Coder-32B-Instruct","Qwen/Qwen2.5-Coder-14B-Instruct","Qwen/Qwen2.5-Coder-7B-Instruct","Qwen/Qwen2.5-72B-Instruct","Qwen/Qwen2.5-32B-Instruct","Qwen/Qwen2.5-14B-Instruct","Qwen/Qwen2.5-7B-Instruct","Qwen/QwQ-32B-Preview",
|
||||
"LLM-Research/Llama-3.3-70B-Instruct","opencompass/CompassJudger-1-32B-Instruct","Qwen/QVQ-72B-Preview","LLM-Research/Meta-Llama-3.1-405B-Instruct","LLM-Research/Meta-Llama-3.1-8B-Instruct","Qwen/Qwen2-VL-7B-Instruct","LLM-Research/Meta-Llama-3.1-70B-Instruct",
|
||||
"Qwen/Qwen2.5-14B-Instruct-1M","Qwen/Qwen2.5-7B-Instruct-1M","Qwen/Qwen2.5-VL-3B-Instruct","Qwen/Qwen2.5-VL-7B-Instruct","Qwen/Qwen2.5-VL-72B-Instruct","deepseek-ai/DeepSeek-R1-Distill-Llama-70B","deepseek-ai/DeepSeek-R1-Distill-Llama-8B","deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B","deepseek-ai/DeepSeek-R1-Distill-Qwen-7B","deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B","deepseek-ai/DeepSeek-R1","deepseek-ai/DeepSeek-V3","Qwen/QwQ-32B"]
|
||||
|
||||
MODEL_LIST = [
|
||||
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
|
||||
O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
|
||||
@@ -93,9 +102,12 @@ MODEL_LIST = [
|
||||
CLAUDE_3_OPUS, CLAUDE_3_OPUS_0229, CLAUDE_35_SONNET, CLAUDE_35_SONNET_1022, CLAUDE_35_SONNET_0620, CLAUDE_3_SONNET, CLAUDE_3_HAIKU, "claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3.5-sonnet",
|
||||
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
|
||||
QWEN, QWEN_TURBO, QWEN_PLUS, QWEN_MAX,
|
||||
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o
|
||||
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o,
|
||||
DEEPSEEK_CHAT, DEEPSEEK_REASONER,
|
||||
MODELSCOPE
|
||||
]
|
||||
|
||||
MODEL_LIST = MODEL_LIST + GITEE_AI_MODEL_LIST + MODELSCOPE_MODEL_LIST
|
||||
# channel
|
||||
FEISHU = "feishu"
|
||||
DINGTALK = "dingtalk"
|
||||
|
||||
@@ -171,6 +171,9 @@ available_setting = {
|
||||
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
|
||||
"moonshot_api_key": "",
|
||||
"moonshot_base_url": "https://api.moonshot.cn/v1/chat/completions",
|
||||
#魔搭社区 平台配置
|
||||
"modelscope_api_key": "",
|
||||
"modelscope_base_url": "https://api-inference.modelscope.cn/v1/chat/completions",
|
||||
# LinkAI平台配置
|
||||
"use_linkai": False,
|
||||
"linkai_api_key": "",
|
||||
|
||||
@@ -339,7 +339,8 @@ class Godcmd(Plugin):
|
||||
ok, result = True, "配置已重载"
|
||||
elif cmd == "resetall":
|
||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI,
|
||||
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT]:
|
||||
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT,
|
||||
const.MODELSCOPE]:
|
||||
channel.cancel_all_session()
|
||||
bot.sessions.clear_all_session()
|
||||
ok, result = True, "重置所有会话成功"
|
||||
|
||||
@@ -99,7 +99,7 @@ class Role(Plugin):
|
||||
if e_context["context"].type != ContextType.TEXT:
|
||||
return
|
||||
btype = Bridge().get_bot_type("chat")
|
||||
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI]:
|
||||
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI,const.MODELSCOPE]:
|
||||
logger.debug(f'不支持的bot: {btype}')
|
||||
return
|
||||
bot = Bridge().get_bot("chat")
|
||||
|
||||
@@ -44,3 +44,6 @@ zhipuai>=2.0.1
|
||||
|
||||
# tongyi qwen new sdk
|
||||
dashscope
|
||||
|
||||
# tencentcloud sdk
|
||||
tencentcloud-sdk-python>=3.0.0
|
||||
|
||||
@@ -8,3 +8,4 @@ Pillow
|
||||
pre-commit
|
||||
web.py
|
||||
linkai>=0.0.6.0
|
||||
|
||||
|
||||
@@ -50,4 +50,8 @@ def create_voice(voice_type):
|
||||
from voice.xunfei.xunfei_voice import XunfeiVoice
|
||||
|
||||
return XunfeiVoice()
|
||||
elif voice_type == "tencent":
|
||||
from voice.tencent.tencent_voice import TencentVoice
|
||||
|
||||
return TencentVoice()
|
||||
raise RuntimeError
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"voice_type": 1003,
|
||||
"secret_id": "YOUR_SECRET_ID",
|
||||
"secret_key": "YOUR_SECRET_KEY"
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
import json
|
||||
import base64
|
||||
import os
|
||||
import time
|
||||
from voice.voice import Voice
|
||||
from common.log import logger
|
||||
from tencentcloud.common import credential
|
||||
from tencentcloud.asr.v20190614 import asr_client, models as asr_models
|
||||
from tencentcloud.tts.v20190823 import tts_client, models as tts_models
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.tmp_dir import TmpDir
|
||||
|
||||
class TencentVoice(Voice):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.secret_id = None
|
||||
self.secret_key = None
|
||||
self.voice_type = 1003
|
||||
self._load_config()
|
||||
|
||||
def _load_config(self):
|
||||
"""
|
||||
从本地配置文件加载配置
|
||||
"""
|
||||
try:
|
||||
config_path = os.path.join(os.path.dirname(__file__), 'config.json')
|
||||
with open(config_path, 'r') as f:
|
||||
config = json.load(f)
|
||||
self.secret_id = config.get('secret_id')
|
||||
self.secret_key = config.get('secret_key')
|
||||
self.voice_type = config.get('voice_type', self.voice_type)
|
||||
if not self.secret_id or not self.secret_key:
|
||||
logger.error("[Tencent] Missing credentials in config.json")
|
||||
except Exception as e:
|
||||
logger.error(f"[Tencent] Failed to load config: {e}")
|
||||
|
||||
def setup(self, config):
|
||||
"""
|
||||
设置配置信息(保留此方法用于向后兼容)
|
||||
"""
|
||||
pass
|
||||
|
||||
def voiceToText(self, voice_file):
|
||||
"""
|
||||
将语音文件转换为文本
|
||||
"""
|
||||
try:
|
||||
# 实例化认证对象
|
||||
cred = credential.Credential(self.secret_id, self.secret_key)
|
||||
|
||||
# 实例化客户端
|
||||
client = asr_client.AsrClient(cred, "ap-guangzhou")
|
||||
|
||||
# 读取音频文件
|
||||
with open(voice_file, 'rb') as f:
|
||||
audio_data = f.read()
|
||||
|
||||
# 进行base64编码
|
||||
base64_audio = base64.b64encode(audio_data).decode('utf-8')
|
||||
|
||||
# 构造请求对象
|
||||
req = asr_models.SentenceRecognitionRequest()
|
||||
req.ProjectId = 0
|
||||
req.SubServiceType = 2
|
||||
req.EngSerViceType = "16k_zh"
|
||||
req.SourceType = 1
|
||||
req.VoiceFormat = "wav"
|
||||
req.UsrAudioKey = "voice_recognition"
|
||||
req.Data = base64_audio
|
||||
|
||||
# 发起请求
|
||||
resp = client.SentenceRecognition(req)
|
||||
|
||||
# 解析结果
|
||||
if resp.Result:
|
||||
logger.info("[Tencent] Voice to text success: {}".format(resp.Result))
|
||||
return Reply(ReplyType.TEXT, resp.Result)
|
||||
else:
|
||||
logger.warning("[Tencent] Voice to text failed")
|
||||
return Reply(ReplyType.ERROR, "腾讯语音识别失败")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("[Tencent] Voice to text error: {}".format(e))
|
||||
return Reply(ReplyType.ERROR, "腾讯语音识别出错:{}".format(str(e)))
|
||||
|
||||
def textToVoice(self, text):
|
||||
"""
|
||||
将文本转换为语音
|
||||
"""
|
||||
try:
|
||||
cred = credential.Credential(self.secret_id, self.secret_key)
|
||||
client = tts_client.TtsClient(cred, "ap-guangzhou")
|
||||
|
||||
req = tts_models.TextToVoiceRequest()
|
||||
req.Text = text
|
||||
req.SessionId = str(int(time.time()))
|
||||
req.Volume = 5
|
||||
req.Speed = 0
|
||||
req.ProjectId = 0
|
||||
req.ModelType = 1
|
||||
req.PrimaryLanguage = 1
|
||||
req.SampleRate = 16000
|
||||
req.VoiceType = self.voice_type # 客服女声
|
||||
|
||||
response = client.TextToVoice(req)
|
||||
|
||||
if response.Audio:
|
||||
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
|
||||
with open(fileName, "wb") as f:
|
||||
f.write(base64.b64decode(response.Audio))
|
||||
logger.info("[Tencent] textToVoice text={} voice file name={}".format(text, fileName))
|
||||
return Reply(ReplyType.VOICE, fileName)
|
||||
else:
|
||||
logger.error("[Tencent] textToVoice failed")
|
||||
return Reply(ReplyType.ERROR, "腾讯语音合成失败")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("[Tencent] Text to voice error: {}".format(e))
|
||||
return Reply(ReplyType.ERROR, "腾讯语音合成出错:{}".format(str(e)))
|
||||
Reference in New Issue
Block a user