Compare commits

..

69 Commits

Author SHA1 Message Date
vision 7167310ccd Merge pull request #2571 from 6vision/master
update readme and adjust some dependency packages.
2025-04-11 16:04:55 +08:00
6vision 263667a2d4 update 2025-04-11 16:03:22 +08:00
6vision d5cef291f6 update readme and adjust some dependency packages. 2025-04-11 15:50:28 +08:00
vision c8d166e833 Merge pull request #2544 from wahahage/master
新增腾讯语音
2025-04-11 14:14:55 +08:00
vision 6e25782d8b docs: Delete channel/wechat/README.md 2025-04-11 10:23:05 +08:00
vision c3127f7e84 Merge pull request #2562 from josephier/support_wcferry
feat: add support for WeChat integration via the wcferry protocol
2025-04-09 18:51:01 +08:00
josephier e8bc173cd7 doc: Update and rename readme.md to README.md 2025-03-31 19:39:01 +08:00
josephier 4d1cdf5207 doc:update git url 2025-03-30 16:20:04 +08:00
josephier 57a473364e Merge branch 'zhayujie:master' into master 2025-03-30 15:14:45 +08:00
vision 40b62e9d38 Add support for ModelScope API-Inference
Add support for ModelScope API-Inference
2025-03-30 15:12:29 +08:00
gaojia ead5f9926b 删除funasr 2025-03-27 10:13:38 +08:00
gaojia 814b6753c2 删除配置文件中的注释 2025-03-26 17:33:39 +08:00
gaojia ce505251f8 修改配置文件及文件夹名称 2025-03-26 10:01:41 +08:00
yrk 5d2a987aaa Update README.md 2025-03-25 10:38:32 +08:00
yanrk123 4d67e08723 Fix the issue with Chinese description in drawing. 2025-03-18 14:11:22 +08:00
yanrk123 2e71dd5fe2 Fix bug in modelscope_bot.py 2025-03-18 09:47:39 +08:00
yanrk123 c3b9643227 Modify ms_bot.py 2025-03-17 15:46:50 +08:00
josephier 0aad5dc2b7 Update wcferry version
Update wcferry version
2025-03-16 19:16:59 +08:00
yanrk123 cec900168f Modify model list 2025-03-14 13:56:00 +08:00
josephier f9b1c403d5 docs: Update readme.md 2025-03-12 20:33:35 +08:00
yrk111222 9024b602f5 Update modelscope_bot.py 2025-03-12 16:15:40 +08:00
yanrk123 c139fd9a57 support stream mode for QwQ-32B 2025-03-12 15:45:52 +08:00
yrk111222 e299b68163 Update const.py 2025-03-11 16:48:37 +08:00
yanrk123 7777a53a82 Add supported model list 2025-03-11 16:34:43 +08:00
yanrk123 3e185dbbfe Add support for ModelScope API 2025-03-11 11:12:57 +08:00
josephier e8a32af369 docs: add README for wx channel based on wcferry
docs: add README for wx channel based on wcferry
2025-03-10 20:36:41 +08:00
josephier 7b0ec6687e docs:add README for WechatFerry channel 2025-03-10 20:29:37 +08:00
gaojia ec1c6c7b92 新增腾讯语音 2025-03-04 09:56:26 +08:00
josephier 8dfaa86760 chore: remove incomplete features for wchatferry 2025-02-14 00:41:31 +08:00
josephier 323aebd1be feat: add support for WeChat integration via the wchatferry 2025-02-14 00:25:09 +08:00
Saboteur7 436c038a2f fix: temporarily remove unavailable channels 2025-02-05 12:25:30 +08:00
vision ccd50ec6c0 Merge pull request #2485 from 6vision/master
feat: Add support for deepseek-chat and deepseek-reasoner models
2025-02-04 10:29:24 +08:00
6vision a7541c2c0f feat: Support #model directive to set model to deepseek-chat and deepseek-reasoner 2025-02-03 21:23:05 +08:00
Saboteur7 c3a57d756c fix: remove channel restrictions 2025-01-31 00:27:20 +08:00
Saboteur7 aa300a4c98 fix: temporarily close the wx channel to prevent account ban 2025-01-17 17:24:42 +08:00
vision 83ea7352b9 Merge pull request #2430 from PJ-568/master
fix: domain type of xunfei lite
2025-01-15 20:03:43 +08:00
Saboteur7 9050712cd8 Update README.md 2024-12-28 16:28:35 +08:00
Saboteur7 8d92fdbb6e Update README.md 2024-12-28 16:27:31 +08:00
zhayujie a2442ec1b9 Merge pull request #2435 from 6vision/master
fix: resolve display issue for replies containing only image URLs
2024-12-27 00:02:55 +08:00
vision 71662c9cd9 Merge branch 'zhayujie:master' into master 2024-12-26 23:17:21 +08:00
vision 54ff5dbcc2 fix: resolve display issue for replies containing only URLs 2024-12-26 23:16:05 +08:00
zhayujie 4ab7bd3b51 Merge pull request #2431 from 6vision/support-GiteeAI
feat: add gitee-ai models that are compatible with openai format
2024-12-24 20:42:17 +08:00
vision ef3c61a297 update readme 2024-12-24 19:57:26 +08:00
vision abf79bf60c add gitee-ai model resources that are compatible with openai format 2024-12-21 17:24:32 +08:00
PJ568 5d3cecd926 fix: domain type of xunfei lite
Reference: [Web API 接口说明](https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E)的 `parameter.chat部分`。
2024-12-20 14:46:25 +08:00
Saboteur7 16324e7283 Merge pull request #2407 from ayasa520/fix_reloadp
fix(plugin): fix reloadp command not taking effect
2024-12-13 15:39:33 +08:00
Saboteur7 9f7e2e1572 Merge pull request #2413 from ayasa520/fix-scanp
fix: Memory leak caused by scanp command due to handler's reference of plugin instance
2024-12-13 14:57:22 +08:00
vision 857ce1d530 Merge pull request #2398 from stonyz/web-channel
增加web channel
2024-12-13 11:45:01 +08:00
vision be0d72775d Merge pull request #2423 from 6vision/reedme_update_docker_deploy
update readme
2024-12-13 11:41:17 +08:00
vision 7832a2495b Merge pull request #2422 from printlndarling/master
add: add gemini-2.0-flash-exp model
2024-12-13 11:35:26 +08:00
6vision 0506b7f735 update readme 2024-12-13 11:25:36 +08:00
繁星_逐梦 4c0b7942f0 add: gemini-2.0-flash-exp model 2024-12-12 22:22:14 +08:00
繁星_逐梦 651c840c4a add: gemini-2.0-flash-exp model 2024-12-12 22:19:13 +08:00
rikka 2a351ca415 fix(reloadp): clear handlers when reloading plugin to avoid memory leaks 2024-12-05 00:33:00 +08:00
rikka 49b7106d71 fix: Memory leak caused by scanp command due to handler's reference to plugin instance.
close #2412
2024-12-03 22:39:56 +08:00
zhayujie 8bf633f539 Merge pull request #2408 from 6vision/fix-summary-image
图像识别逻辑优化
2024-12-02 21:53:52 +08:00
6vision 0f8efcb4b0 图像识别逻辑优化 2024-12-02 21:16:59 +08:00
Rikka c567641c5c fix(plugin): fix reloadp command not taking effect
- Use write_plugin_config() instead of directly modifying plugin_config dict
- Add remove_plugin_config() to clear plugin config before reload
- Update plugins to use pconf() and write_plugin_config() for better config management
2024-12-02 16:38:21 +08:00
vision bdc3820382 Merge pull request #2405 from 6vision/role-plugin-linkai
Linkai bot is compatible with the role plugin.
2024-12-02 12:16:30 +08:00
6vision 33a69a7907 Linkai bot is compatible with the role plugin. 2024-12-02 12:13:26 +08:00
vision a4d0e9bbc3 Merge pull request #2401 from 6vision/plugins_source_update
插件列表更新
2024-11-29 11:09:27 +08:00
6vision afc753e1d2 插件列表更新 2024-11-29 11:07:16 +08:00
zhayujie e641a41224 Update README.md 2024-11-28 21:48:42 +08:00
vision 79305c0632 Merge pull request #2400 from 6vision/readme_update
readme update
2024-11-28 12:59:00 +08:00
6vision ef2ce3f09d 说明文档更新 2024-11-28 12:41:00 +08:00
Stony 71c18c04fc 增加web channel 2024-11-27 08:53:13 +08:00
Saboteur7 cf84e57f81 fix: add exception handling 2024-11-15 11:58:10 +08:00
vision 9421d44579 Merge pull request #2373 from 6vision/summary_app_code
Buy using app code, supports custom summary prompt .
2024-11-07 20:16:53 +08:00
6vision 5cd2ae8cc8 Summary supports app_code 2024-11-06 21:45:03 +08:00
33 changed files with 1217 additions and 67 deletions
+1
View File
@@ -14,6 +14,7 @@ tmp
plugins.json
itchat.pkl
*.log
logs/
user_datas.pkl
chatgpt_tool_hub/
plugins/**/
+18 -5
View File
@@ -1,11 +1,19 @@
# 简介
<p align="center"><img src= "https://github.com/user-attachments/assets/31fb4eab-3be4-477d-aa76-82cf62bfd12c" alt="Chatgpt-on-Wechat" width="600" /></p>
> chatgpt-on-wechat(简称CoW)项目是基于大模型的智能对话机器人,支持微信公众号、企业微信应用、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/Gemini/LinkAI/ChatGLM/KIMI/文心一言/讯飞星火/通义千问/LinkAI,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。
<p align="center">
<a href="https://github.com/zhayujie/chatgpt-on-wechat/releases/latest"><img src="https://img.shields.io/github/v/release/zhayujie/chatgpt-on-wechat" alt="Latest release"></a>
<a href="https://github.com/zhayujie/chatgpt-on-wechat/blob/master/LICENSE"><img src="https://img.shields.io/github/license/zhayujie/chatgpt-on-wechat" alt="License: MIT"></a>
<a href="https://github.com/zhayujie/chatgpt-on-wechat"><img src="https://img.shields.io/github/stars/zhayujie/chatgpt-on-wechat?style=flat-square" alt="Stars"></a> <br/>
</p>
chatgpt-on-wechat(简称CoW)项目是基于大模型的智能对话机器人,支持微信公众号、企业微信应用、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/Gemini/LinkAI/ChatGLM/KIMI/文心一言/讯飞星火/通义千问/LinkAI/ModelScope,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。
# 简介
最新版本支持的功能如下:
-**多端部署:** 有多种部署方式可选择且功能完备,目前已支持微信公众号、企业微信应用、飞书、钉钉等部署方式
-**基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4o-mini, GPT-4o, GPT-4, Claude-3.5, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4Kimi(月之暗面), MiniMax
-**基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4o-mini, GPT-4o, GPT-4, Claude-3.5, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4Kimi(月之暗面), MiniMax, GiteeAI, ModelScope(魔搭社区)
-**语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型
-**图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, CogView-3, vision模型
-**丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话、联网搜索等插件
@@ -40,11 +48,15 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
**企业服务和产品咨询** 可联系产品顾问:
<img width="160" src="https://img-1317903499.cos.ap-guangzhou.myqcloud.com/docs/github-product-consult.png">
<img width="160" src="https://cdn.link-ai.tech/consultant-s.jpg">
<br>
# 🏷 更新日志
>**2025.04.11** [1.7.5版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.5) 新增支持 [wechatferry](https://github.com/zhayujie/chatgpt-on-wechat/pull/2562) 协议、新增 deepseek 模型、新增支持腾讯云语音能力、新增支持 ModelScope 和 Gitee-AI API接口
>**2024.12.13** [1.7.4版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.4) 新增 Gemini 2.0 模型、新增web channel、解决内存泄漏问题、解决 `#reloadp` 命令重载不生效问题
>**2024.10.31** [1.7.3版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.3) 程序稳定性提升、数据库功能、Claude模型优化、linkai插件优化、离线通知
>**2024.09.26** [1.7.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.2) 和 [1.7.1版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.1) 文心,讯飞等模型优化、o1 模型、快速安装和管理脚本
@@ -144,6 +156,7 @@ pip3 install -r requirements-optional.txt
{
"model": "gpt-3.5-turbo", # 模型名称, 支持 gpt-3.5-turbo, gpt-4, gpt-4-turbo, wenxin, xunfei, glm-4, claude-3-haiku, moonshot
"open_ai_api_key": "YOUR API KEY", # 如果使用openAI模型则填入上面创建的 OpenAI API KEY
"open_ai_api_base": "https://api.openai.com/v1", # OpenAI接口代理地址
"proxy": "", # 代理客户端的ip和端口,国内环境开启代理的需要填写该项,如 "127.0.0.1:7890"
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
@@ -275,7 +288,7 @@ sudo docker logs -f chatgpt-on-wechat
volumes:
- ./config.json:/app/plugins/config.json
```
**注**:采用docker方式部署的详细教程可以参考:[docker部署CoW项目](https://www.wangpc.cc/ai/docker-deploy-cow/)
### 4. Railway部署
> Railway 每月提供5刀和最多500小时的免费额度。 (07.11更新: 目前大部分账号已无法免费部署)
+1 -1
View File
@@ -27,7 +27,7 @@ def sigterm_handler_wrap(_signo):
def start_channel(channel_name: str):
channel = channel_factory.create_channel(channel_name)
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app", "wework",
if channel_name in ["wx", "wxy", "terminal", "wechatmp","web", "wechatmp_service", "wechatcom_app", "wework",
const.FEISHU, const.DINGTALK]:
PluginManager().load_plugins()
+4
View File
@@ -68,5 +68,9 @@ def create_bot(bot_type):
from bot.minimax.minimax_bot import MinimaxBot
return MinimaxBot()
elif bot_type == const.MODELSCOPE:
from bot.modelscope.modelscope_bot import ModelScopeBot
return ModelScopeBot()
raise RuntimeError
+1 -1
View File
@@ -83,7 +83,7 @@ def num_tokens_from_messages(messages, model):
tokens_per_message = 3
tokens_per_name = 1
else:
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
logger.debug(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
num_tokens = 0
for message in messages:
+3 -3
View File
@@ -147,9 +147,9 @@ class LinkAIBot(Bot):
if response["choices"][0].get("img_urls"):
thread = threading.Thread(target=self._send_image, args=(context.get("channel"), context, response["choices"][0].get("img_urls")))
thread.start()
if response["choices"][0].get("text_content"):
reply_content = response["choices"][0].get("text_content")
reply_content = self._process_url(reply_content)
reply_content = response["choices"][0].get("text_content")
if reply_content:
reply_content = self._process_url(reply_content)
return Reply(ReplyType.TEXT, reply_content)
else:
+277
View File
@@ -0,0 +1,277 @@
# encoding:utf-8
import time
import json
import openai
import openai.error
from bot.bot import Bot
from bot.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from .modelscope_session import ModelScopeSession
import requests
# ModelScope对话模型API
class ModelScopeBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(ModelScopeSession, model=conf().get("model") or "Qwen/Qwen2.5-7B-Instruct")
model = conf().get("model") or "Qwen/Qwen2.5-7B-Instruct"
if model == "modelscope":
model = "Qwen/Qwen2.5-7B-Instruct"
self.args = {
"model": model, # 对话模型的名称
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
"top_p": conf().get("top_p", 1.0), # 使用默认值
}
self.api_key = conf().get("modelscope_api_key")
self.base_url = conf().get("modelscope_base_url", "https://api-inference.modelscope.cn/v1/chat/completions")
"""
需要获取ModelScope支持API-inference的模型名称列表,请到魔搭社区官网模型中心查看 https://modelscope.cn/models?filter=inference_type&page=1。
或者使用命令 curl https://api-inference.modelscope.cn/v1/models 对模型列表和ID进行获取。查看commend/const.py文件也可以获取模型列表。
获取ModelScope的免费API Key,请到魔搭社区官网用户中心查看获取方式 https://modelscope.cn/docs/model-service/API-Inference/intro。
"""
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[MODELSCOPE_AI] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[MODELSCOPE_AI] session query={}".format(session.messages))
model = context.get("modelscope_model")
new_args = self.args.copy()
if model:
new_args["model"] = model
if new_args["model"] == "Qwen/QwQ-32B":
reply_content = self.reply_text_stream(session, args=new_args)
else:
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[MODELSCOPE_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
# 只有当 content 为空且 completion_tokens 为 0 时才标记为错误
if len(reply_content["content"]) == 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
else:
reply = Reply(ReplyType.TEXT, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[MODELSCOPE_AI] reply {} used 0 tokens.".format(reply_content))
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
"""
call openai's ChatCompletion to get the answer
:param session: a conversation session
:param session_id: session id
:param retry_count: retry count
:return: {}
"""
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key
}
body = args
body["messages"] = session.messages
res = requests.post(
self.base_url,
headers=headers,
data=json.dumps(body)
)
if res.status_code == 200:
response = res.json()
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response["choices"][0]["message"]["content"]
}
else:
response = res.json()
if "errors" in response:
error = response.get("errors")
elif "error" in response:
error = response.get("error")
else:
error = "Unknown error"
logger.error(f"[MODELSCOPE_AI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
# server error, need retry
logger.warn(f"[MODELSCOPE_AI] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, args, retry_count + 1)
else:
return result
def reply_text_stream(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
"""
call ModelScope's ChatCompletion to get the answer with stream response
:param session: a conversation session
:param session_id: session id
:param retry_count: retry count
:return: {}
"""
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key
}
body = args
body["messages"] = session.messages
body["stream"] = True # 启用流式响应
res = requests.post(
self.base_url,
headers=headers,
data=json.dumps(body),
stream=True
)
if res.status_code == 200:
content = ""
for line in res.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith("data: "):
try:
json_data = json.loads(decoded_line[6:])
delta_content = json_data.get("choices", [{}])[0].get("delta", {}).get("content", "")
if delta_content:
content += delta_content
except json.JSONDecodeError as e:
pass
return {
"total_tokens": 1, # 流式响应通常不返回token使用情况
"completion_tokens": 1,
"content": content
}
else:
response = res.json()
if "errors" in response:
error = response.get("errors")
elif "error" in response:
error = response.get("error")
else:
error = "Unknown error"
logger.error(f"[MODELSCOPE_AI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
# server error, need retry
logger.warn(f"[MODELSCOPE_AI] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text_stream(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text_stream(session, args, retry_count + 1)
else:
return result
def create_img(self, query, retry_count=0):
try:
logger.info("[ModelScopeImage] image_query={}".format(query))
headers = {
"Content-Type": "application/json; charset=utf-8", # 明确指定编码
"Authorization": f"Bearer {self.api_key}"
}
payload = {
"prompt": query, # required
"n": 1,
"model": conf().get("text_to_image"),
}
url = "https://api-inference.modelscope.cn/v1/images/generations"
# 手动序列化并保留中文(禁用 ASCII 转义)
json_payload = json.dumps(payload, ensure_ascii=False).encode('utf-8')
# 使用 data 参数发送原始字符串(requests 会自动处理编码)
res = requests.post(url, headers=headers, data=json_payload)
response_data = res.json()
image_url = response_data['images'][0]['url']
logger.info("[ModelScopeImage] image_url={}".format(image_url))
return True, image_url
except Exception as e:
logger.error(format(e))
return False, "画图出现问题,请休息一下再问我吧"
+51
View File
@@ -0,0 +1,51 @@
from bot.session_manager import Session
from common.log import logger
class ModelScopeSession(Session):
def __init__(self, session_id, system_prompt=None, model="Qwen/Qwen2.5-7B-Instruct"):
super().__init__(session_id, system_prompt)
self.model = model
self.reset()
def discard_exceeding(self, max_tokens, cur_tokens=None):
precise = True
try:
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
raise e
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
while cur_tokens > max_tokens:
if len(self.messages) > 2:
self.messages.pop(1)
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
self.messages.pop(1)
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
break
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
break
else:
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
len(self.messages)))
break
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
return cur_tokens
def calc_tokens(self):
return num_tokens_from_messages(self.messages, self.model)
def num_tokens_from_messages(messages, model):
tokens = 0
for msg in messages:
tokens += len(msg["content"])
return tokens
+1 -1
View File
@@ -41,7 +41,7 @@ class XunFeiBot(Bot):
self.api_key = conf().get("xunfei_api_key")
self.api_secret = conf().get("xunfei_api_secret")
# 默认使用v2.0版本: "generalv2"
# Spark Lite请求地址(spark_url): wss://spark-api.xf-yun.com/v1.1/chat, 对应的domain参数为: "general"
# Spark Lite请求地址(spark_url): wss://spark-api.xf-yun.com/v1.1/chat, 对应的domain参数为: "lite"
# Spark V2.0请求地址(spark_url): wss://spark-api.xf-yun.com/v2.1/chat, 对应的domain参数为: "generalv2"
# Spark Pro 请求地址(spark_url): wss://spark-api.xf-yun.com/v3.1/chat, 对应的domain参数为: "generalv3"
# Spark Pro-128K请求地址(spark_url): wss://spark-api.xf-yun.com/chat/pro-128k, 对应的domain参数为: "pro-128k"
+3
View File
@@ -49,6 +49,9 @@ class Bridge(object):
if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
self.btype["chat"] = const.MOONSHOT
if model_type in [const.MODELSCOPE]:
self.btype["chat"] = const.MODELSCOPE
if model_type in ["abab6.5-chat"]:
self.btype["chat"] = const.MiniMax
+6
View File
@@ -18,9 +18,15 @@ def create_channel(channel_type) -> Channel:
elif channel_type == "wxy":
from channel.wechat.wechaty_channel import WechatyChannel
ch = WechatyChannel()
elif channel_type == "wcf":
from channel.wechat.wcf_channel import WechatfChannel
ch = WechatfChannel()
elif channel_type == "terminal":
from channel.terminal.terminal_channel import TerminalChannel
ch = TerminalChannel()
elif channel_type == 'web':
from channel.web.web_channel import WebChannel
ch = WebChannel()
elif channel_type == "wechatmp":
from channel.wechatmp.wechatmp_channel import WechatMPChannel
ch = WechatMPChannel(passive_reply=True)
+7
View File
@@ -0,0 +1,7 @@
# Web channel
使用SSEServer-Sent Events,服务器推送事件)实现,提供了一个默认的网页。也可以自己实现加入api
#使用方法
- 在配置文件中channel_type填入web即可
- 访问地址 http://localhost:9899/chat
- port可以在配置项 web_port中设置
+165
View File
@@ -0,0 +1,165 @@
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Chat</title>
<style>
body {
font-family: Arial, sans-serif;
display: flex;
flex-direction: column;
height: 100vh; /* 占据所有高度 */
margin: 0;
/* background-color: #f8f9fa; */
}
#chat-container {
display: flex;
flex-direction: column;
width: 100%;
max-width: 500px;
margin: auto;
border: 1px solid #ccc;
border-radius: 5px;
overflow: hidden;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
flex: 1; /* 使聊天容器占据剩余空间 */
}
#messages {
flex-direction: column;
display: flex;
flex: 1;
overflow-y: auto;
padding: 10px;
overflow-y: auto;
border-bottom: 1px solid #ccc;
background-color: #ffffff;
}
.message {
margin: 5px 0; /* 间隔 */
padding: 10px 15px; /* 内边距 */
border-radius: 15px; /* 圆角 */
max-width: 80%; /* 限制最大宽度 */
min-width: 80px; /* 设置最小宽度 */
min-height: 40px; /* 设置最小高度 */
word-wrap: break-word; /* 自动换行 */
position: relative; /* 时间戳定位 */
display: inline-block; /* 内容自适应宽度 */
box-sizing: border-box; /* 包括内边距和边框 */
flex-shrink: 0; /* 禁止高度被压缩 */
word-wrap: break-word; /* 自动换行,防止单行过长 */
white-space: normal; /* 允许正常换行 */
overflow: hidden;
}
.bot {
background-color: #f1f1f1; /* 灰色背景 */
color: black; /* 黑色字体 */
align-self: flex-start; /* 左对齐 */
margin-right: auto; /* 确保消息靠左 */
text-align: left; /* 内容左对齐 */
}
.user {
background-color: #2bc840; /* 蓝色背景 */
align-self: flex-end; /* 右对齐 */
margin-left: auto; /* 确保消息靠右 */
text-align: left; /* 内容左对齐 */
}
.timestamp {
font-size: 0.8em; /* 时间戳字体大小 */
color: rgba(0, 0, 0, 0.5); /* 半透明黑色 */
margin-bottom: 5px; /* 时间戳下方间距 */
display: block; /* 时间戳独占一行 */
}
#input-container {
display: flex;
padding: 10px;
background-color: #ffffff;
border-top: 1px solid #ccc;
}
#input {
flex: 1;
padding: 10px;
border: 1px solid #ccc;
border-radius: 5px;
margin-right: 10px;
}
#send {
padding: 10px;
border: none;
background-color: #007bff;
color: white;
border-radius: 5px;
cursor: pointer;
}
#send:hover {
background-color: #0056b3;
}
</style>
</head>
<body>
<div id="chat-container">
<div id="messages"></div>
<div id="input-container">
<input type="text" id="input" placeholder="输入消息..." />
<button id="send">发送</button>
</div>
</div>
<script>
const messagesDiv = document.getElementById('messages');
const input = document.getElementById('input');
const sendButton = document.getElementById('send');
// 生成唯一的 user_id
const userId = 'user_' + Math.random().toString(36).substr(2, 9);
// 连接 SSE
const eventSource = new EventSource(`/sse/${userId}`);
eventSource.onmessage = function(event) {
const message = JSON.parse(event.data);
const messageDiv = document.createElement('div');
messageDiv.className = 'message bot';
const timestamp = new Date(message.timestamp).toLocaleTimeString(); // 假设消息中有时间戳
messageDiv.innerHTML = `<div class="timestamp">${timestamp}</div>${message.content}`; // 显示时间
messagesDiv.appendChild(messageDiv);
messagesDiv.scrollTop = messagesDiv.scrollHeight; // 滚动到底部
};
sendButton.onclick = function() {
sendMessage();
};
input.addEventListener('keypress', function(event) {
if (event.key === 'Enter') {
sendMessage();
event.preventDefault(); // 防止换行
}
});
function sendMessage() {
const userMessage = input.value;
if (userMessage) {
const timestamp = new Date().toISOString(); // 获取当前时间戳
fetch('/message', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ user_id: userId, message: userMessage, timestamp: timestamp }) // 发送时间戳
});
const messageDiv = document.createElement('div');
messageDiv.className = 'message user';
const userTimestamp = new Date().toLocaleTimeString(); // 获取当前时间
messageDiv.innerHTML = `<div class="timestamp">${userTimestamp}</div>${userMessage}`; // 显示时间
messagesDiv.appendChild(messageDiv);
messagesDiv.scrollTop = messagesDiv.scrollHeight; // 滚动到底部
input.value = ''; // 清空输入框
}
}
</script>
</body>
</html>
+204
View File
@@ -0,0 +1,204 @@
import sys
import time
import web
import json
from queue import Queue
from bridge.context import *
from bridge.reply import Reply, ReplyType
from channel.chat_channel import ChatChannel, check_prefix
from channel.chat_message import ChatMessage
from common.log import logger
from common.singleton import singleton
from config import conf
import os
class WebMessage(ChatMessage):
def __init__(
self,
msg_id,
content,
ctype=ContextType.TEXT,
from_user_id="User",
to_user_id="Chatgpt",
other_user_id="Chatgpt",
):
self.msg_id = msg_id
self.ctype = ctype
self.content = content
self.from_user_id = from_user_id
self.to_user_id = to_user_id
self.other_user_id = other_user_id
@singleton
class WebChannel(ChatChannel):
NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE]
_instance = None
# def __new__(cls):
# if cls._instance is None:
# cls._instance = super(WebChannel, cls).__new__(cls)
# return cls._instance
def __init__(self):
super().__init__()
self.message_queues = {} # 为每个用户存储一个消息队列
self.msg_id_counter = 0 # 添加消息ID计数器
def _generate_msg_id(self):
"""生成唯一的消息ID"""
self.msg_id_counter += 1
return str(int(time.time())) + str(self.msg_id_counter)
def send(self, reply: Reply, context: Context):
try:
if reply.type == ReplyType.IMAGE:
from PIL import Image
image_storage = reply.content
image_storage.seek(0)
img = Image.open(image_storage)
print("<IMAGE>")
img.show()
elif reply.type == ReplyType.IMAGE_URL:
import io
import requests
from PIL import Image
img_url = reply.content
pic_res = requests.get(img_url, stream=True)
image_storage = io.BytesIO()
for block in pic_res.iter_content(1024):
image_storage.write(block)
image_storage.seek(0)
img = Image.open(image_storage)
print(img_url)
img.show()
else:
print(reply.content)
# 获取用户ID,如果没有则使用默认值
# user_id = getattr(context.get("session", None), "session_id", "default_user")
user_id = context["receiver"]
# 确保用户有对应的消息队列
if user_id not in self.message_queues:
self.message_queues[user_id] = Queue()
# 将消息放入对应用户的队列
message_data = {
"type": str(reply.type),
"content": reply.content,
"timestamp": time.time()
}
self.message_queues[user_id].put(message_data)
logger.debug(f"Message queued for user {user_id}")
except Exception as e:
logger.error(f"Error in send method: {e}")
raise
def sse_handler(self, user_id):
"""
Handle Server-Sent Events (SSE) for real-time communication.
"""
web.header('Content-Type', 'text/event-stream')
web.header('Cache-Control', 'no-cache')
web.header('Connection', 'keep-alive')
# 确保用户有消息队列
if user_id not in self.message_queues:
self.message_queues[user_id] = Queue()
try:
while True:
try:
# 发送心跳
yield f": heartbeat\n\n"
# 非阻塞方式获取消息
if not self.message_queues[user_id].empty():
message = self.message_queues[user_id].get_nowait()
yield f"data: {json.dumps(message)}\n\n"
time.sleep(0.5)
except Exception as e:
logger.error(f"SSE Error: {e}")
break
finally:
# 清理资源
if user_id in self.message_queues:
# 只有当队列为空时才删除
if self.message_queues[user_id].empty():
del self.message_queues[user_id]
def post_message(self):
"""
Handle incoming messages from users via POST request.
"""
try:
data = web.data() # 获取原始POST数据
json_data = json.loads(data)
user_id = json_data.get('user_id', 'default_user')
prompt = json_data.get('message', '')
except json.JSONDecodeError:
return json.dumps({"status": "error", "message": "Invalid JSON"})
except Exception as e:
return json.dumps({"status": "error", "message": str(e)})
if not prompt:
return json.dumps({"status": "error", "message": "No message provided"})
try:
msg_id = self._generate_msg_id()
context = self._compose_context(ContextType.TEXT, prompt, msg=WebMessage(msg_id,
prompt,
from_user_id=user_id,
other_user_id = user_id
))
context["isgroup"] = False
# context["session"] = web.storage(session_id=user_id)
if not context:
return json.dumps({"status": "error", "message": "Failed to process message"})
self.produce(context)
return json.dumps({"status": "success", "message": "Message received"})
except Exception as e:
logger.error(f"Error processing message: {e}")
return json.dumps({"status": "error", "message": "Internal server error"})
def chat_page(self):
"""Serve the chat HTML page."""
file_path = os.path.join(os.path.dirname(__file__), 'chat.html') # 使用绝对路径
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def startup(self):
logger.setLevel("WARN")
print("\nWeb Channel is running, please visit http://localhost:9899/chat")
urls = (
'/sse/(.+)', 'SSEHandler', # 修改路由以接收用户ID
'/message', 'MessageHandler',
'/chat', 'ChatHandler',
)
port = conf().get("web_port", 9899)
app = web.application(urls, globals(), autoreload=False)
web.httpserver.runsimple(app.wsgifunc(), ("0.0.0.0", port))
class SSEHandler:
def GET(self, user_id):
return WebChannel().sse_handler(user_id)
class MessageHandler:
def POST(self):
return WebChannel().post_message()
class ChatHandler:
def GET(self):
return WebChannel().chat_page()
+179
View File
@@ -0,0 +1,179 @@
# encoding:utf-8
"""
wechat channel
"""
import io
import json
import os
import threading
import time
from queue import Empty
from typing import Any
from bridge.context import *
from bridge.reply import *
from channel.chat_channel import ChatChannel
from channel.wechat.wcf_message import WechatfMessage
from common.log import logger
from common.singleton import singleton
from common.utils import *
from config import conf, get_appdata_dir
from wcferry import Wcf, WxMsg
@singleton
class WechatfChannel(ChatChannel):
NOT_SUPPORT_REPLYTYPE = []
def __init__(self):
super().__init__()
self.NOT_SUPPORT_REPLYTYPE = []
# 使用字典存储最近消息,用于去重
self.received_msgs = {}
# 初始化wcferry客户端
self.wcf = Wcf()
self.wxid = None # 登录后会被设置为当前登录用户的wxid
def startup(self):
"""
启动通道
"""
try:
# wcferry会自动唤起微信并登录
self.wxid = self.wcf.get_self_wxid()
self.name = self.wcf.get_user_info().get("name")
logger.info(f"微信登录成功,当前用户ID: {self.wxid}, 用户名:{self.name}")
self.contact_cache = ContactCache(self.wcf)
self.contact_cache.update()
# 启动消息接收
self.wcf.enable_receiving_msg()
# 创建消息处理线程
t = threading.Thread(target=self._process_messages, name="WeChatThread", daemon=True)
t.start()
except Exception as e:
logger.error(f"微信通道启动失败: {e}")
raise e
def _process_messages(self):
"""
处理消息队列
"""
while True:
try:
msg = self.wcf.get_msg()
if msg:
self._handle_message(msg)
except Empty:
continue
except Exception as e:
logger.error(f"处理消息失败: {e}")
continue
def _handle_message(self, msg: WxMsg):
"""
处理单条消息
"""
try:
# 构造消息对象
cmsg = WechatfMessage(self, msg)
# 消息去重
if cmsg.msg_id in self.received_msgs:
return
self.received_msgs[cmsg.msg_id] = time.time()
# 清理过期消息ID
self._clean_expired_msgs()
logger.debug(f"收到消息: {msg}")
context = self._compose_context(cmsg.ctype, cmsg.content,
isgroup=cmsg.is_group,
msg=cmsg)
if context:
self.produce(context)
except Exception as e:
logger.error(f"处理消息失败: {e}")
def _clean_expired_msgs(self, expire_time: float = 60):
"""
清理过期的消息ID
"""
now = time.time()
for msg_id in list(self.received_msgs.keys()):
if now - self.received_msgs[msg_id] > expire_time:
del self.received_msgs[msg_id]
def send(self, reply: Reply, context: Context):
"""
发送消息
"""
receiver = context["receiver"]
if not receiver:
logger.error("receiver is empty")
return
try:
if reply.type == ReplyType.TEXT:
# 处理@信息
at_list = []
if context.get("isgroup"):
if context["msg"].actual_user_id:
at_list = [context["msg"].actual_user_id]
at_str = ",".join(at_list) if at_list else ""
self.wcf.send_text(reply.content, receiver, at_str)
elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO:
self.wcf.send_text(reply.content, receiver)
else:
logger.error(f"暂不支持的消息类型: {reply.type}")
except Exception as e:
logger.error(f"发送消息失败: {e}")
def close(self):
"""
关闭通道
"""
try:
self.wcf.cleanup()
except Exception as e:
logger.error(f"关闭通道失败: {e}")
class ContactCache:
def __init__(self, wcf):
"""
wcf: 一个 wcfferry.client.Wcf 实例
"""
self.wcf = wcf
self._contact_map = {} # 形如 {wxid: {完整联系人信息}}
def update(self):
"""
更新缓存:调用 get_contacts()
再把 wcf.contacts 构建成 {wxid: {完整信息}} 的字典
"""
self.wcf.get_contacts()
self._contact_map.clear()
for item in self.wcf.contacts:
wxid = item.get('wxid')
if wxid: # 确保有 wxid 字段
self._contact_map[wxid] = item
def get_contact(self, wxid: str) -> dict:
"""
返回该 wxid 对应的完整联系人 dict,
如果没找到就返回 None
"""
return self._contact_map.get(wxid)
def get_name_by_wxid(self, wxid: str) -> str:
"""
通过wxid,获取成员/群名称
"""
contact = self.get_contact(wxid)
if contact:
return contact.get('name', '')
return ''
+58
View File
@@ -0,0 +1,58 @@
# encoding:utf-8
"""
wechat channel message
"""
from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from wcferry import WxMsg
class WechatfMessage(ChatMessage):
"""
微信消息封装类
"""
def __init__(self, channel, wcf_msg: WxMsg, is_group=False):
"""
初始化消息对象
:param wcf_msg: wcferry消息对象
:param is_group: 是否是群消息
"""
super().__init__(wcf_msg)
self.msg_id = wcf_msg.id
self.create_time = wcf_msg.ts # 使用消息时间戳
self.is_group = is_group or wcf_msg._is_group
self.wxid = channel.wxid
self.name = channel.name
# 解析消息类型
if wcf_msg.is_text():
self.ctype = ContextType.TEXT
self.content = wcf_msg.content
else:
raise NotImplementedError(f"Unsupported message type: {wcf_msg.type}")
# 设置发送者和接收者信息
self.from_user_id = self.wxid if wcf_msg.sender == self.wxid else wcf_msg.sender
self.from_user_nickname = self.name if wcf_msg.sender == self.wxid else channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
self.to_user_id = self.wxid
self.to_user_nickname = self.name
self.other_user_id = wcf_msg.sender
self.other_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
# 群消息特殊处理
if self.is_group:
self.other_user_id = wcf_msg.roomid
self.other_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.roomid)
self.actual_user_id = wcf_msg.sender
self.actual_user_nickname = channel.wcf.get_alias_in_chatroom(wcf_msg.sender, wcf_msg.roomid)
if not self.actual_user_nickname: # 群聊获取不到企微号成员昵称,这里尝试从联系人缓存去获取
self.actual_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
self.room_id = wcf_msg.roomid
self.is_at = wcf_msg.is_at(self.wxid) # 是否被@当前登录用户
# 判断是否是自己发送的消息
self.my_msg = wcf_msg.from_self()
+29 -17
View File
@@ -117,23 +117,35 @@ class WechatChannel(ChatChannel):
def startup(self):
try:
itchat.instance.receivingRetryCount = 600 # 修改断线超时时间
# login by scan QRCode
hotReload = conf().get("hot_reload", False)
status_path = os.path.join(get_appdata_dir(), "itchat.pkl")
itchat.auto_login(
enableCmdQR=2,
hotReload=hotReload,
statusStorageDir=status_path,
qrCallback=qrCallback,
exitCallback=self.exitCallback,
loginCallback=self.loginCallback
)
self.user_id = itchat.instance.storageClass.userName
self.name = itchat.instance.storageClass.nickName
logger.info("Wechat login success, user_id: {}, nickname: {}".format(self.user_id, self.name))
# start message listener
itchat.run()
time.sleep(3)
logger.error("""[WechatChannel] 当前channel暂不可用,目前支持的channel有:
1. terminal: 终端
2. wechatmp: 个人公众号
3. wechatmp_service: 企业公众号
4. wechatcom_app: 企微自建应用
5. dingtalk: 钉钉
6. feishu: 飞书
7. web: 网页
8. wcf: wechat (需Windows环境,参考 https://github.com/zhayujie/chatgpt-on-wechat/pull/2562 )
可修改 config.json 配置文件的 channel_type 字段进行切换""")
# itchat.instance.receivingRetryCount = 600 # 修改断线超时时间
# # login by scan QRCode
# hotReload = conf().get("hot_reload", False)
# status_path = os.path.join(get_appdata_dir(), "itchat.pkl")
# itchat.auto_login(
# enableCmdQR=2,
# hotReload=hotReload,
# statusStorageDir=status_path,
# qrCallback=qrCallback,
# exitCallback=self.exitCallback,
# loginCallback=self.loginCallback
# )
# self.user_id = itchat.instance.storageClass.userName
# self.name = itchat.instance.storageClass.nickName
# logger.info("Wechat login success, user_id: {}, nickname: {}".format(self.user_id, self.name))
# # start message listener
# itchat.run()
except Exception as e:
logger.exception(e)
+19 -5
View File
@@ -15,7 +15,7 @@ GEMINI = "gemini" # gemini-1.0-pro
ZHIPU_AI = "glm-4"
MOONSHOT = "moonshot"
MiniMax = "minimax"
MODELSCOPE = "modelscope"
# model
CLAUDE3 = "claude-3-opus-20240229"
@@ -59,6 +59,8 @@ LINKAI_4o = "linkai-4o"
GEMINI_PRO = "gemini-1.0-pro"
GEMINI_15_flash = "gemini-1.5-flash"
GEMINI_15_PRO = "gemini-1.5-pro"
GEMINI_20_flash_exp = "gemini-2.0-flash-exp"
GLM_4 = "glm-4"
GLM_4_PLUS = "glm-4-plus"
@@ -72,14 +74,23 @@ GLM_4_AIRX = "glm-4-airx"
CLAUDE_3_OPUS = "claude-3-opus-latest"
CLAUDE_3_OPUS_0229 = "claude-3-opus-20240229"
CLAUDE_35_SONNET = "claude-3-5-sonnet-latest" # 带 latest 标签的模型名称,会不断更新指向最新发布的模型
CLAUDE_35_SONNET_1022 = "claude-3-5-sonnet-20241022" # 带具体日期的模型名称,会固定为该日期发布的模型
CLAUDE_35_SONNET_0620 = "claude-3-5-sonnet-20240620"
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
DEEPSEEK_CHAT = "deepseek-chat" # DeepSeek-V3对话模型
DEEPSEEK_REASONER = "deepseek-reasoner" # DeepSeek-R1模型
GITEE_AI_MODEL_LIST = ["Yi-34B-Chat", "InternVL2-8B", "deepseek-coder-33B-instruct", "InternVL2.5-26B", "Qwen2-VL-72B", "Qwen2.5-32B-Instruct", "glm-4-9b-chat", "codegeex4-all-9b", "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Qwen2.5-7B-Instruct", "Qwen2-72B-Instruct", "Qwen2-7B-Instruct", "code-raccoon-v1", "Qwen2.5-14B-Instruct"]
MODELSCOPE_MODEL_LIST = ["LLM-Research/c4ai-command-r-plus-08-2024","mistralai/Mistral-Small-Instruct-2409","mistralai/Ministral-8B-Instruct-2410","mistralai/Mistral-Large-Instruct-2407",
"Qwen/Qwen2.5-Coder-32B-Instruct","Qwen/Qwen2.5-Coder-14B-Instruct","Qwen/Qwen2.5-Coder-7B-Instruct","Qwen/Qwen2.5-72B-Instruct","Qwen/Qwen2.5-32B-Instruct","Qwen/Qwen2.5-14B-Instruct","Qwen/Qwen2.5-7B-Instruct","Qwen/QwQ-32B-Preview",
"LLM-Research/Llama-3.3-70B-Instruct","opencompass/CompassJudger-1-32B-Instruct","Qwen/QVQ-72B-Preview","LLM-Research/Meta-Llama-3.1-405B-Instruct","LLM-Research/Meta-Llama-3.1-8B-Instruct","Qwen/Qwen2-VL-7B-Instruct","LLM-Research/Meta-Llama-3.1-70B-Instruct",
"Qwen/Qwen2.5-14B-Instruct-1M","Qwen/Qwen2.5-7B-Instruct-1M","Qwen/Qwen2.5-VL-3B-Instruct","Qwen/Qwen2.5-VL-7B-Instruct","Qwen/Qwen2.5-VL-72B-Instruct","deepseek-ai/DeepSeek-R1-Distill-Llama-70B","deepseek-ai/DeepSeek-R1-Distill-Llama-8B","deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B","deepseek-ai/DeepSeek-R1-Distill-Qwen-7B","deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B","deepseek-ai/DeepSeek-R1","deepseek-ai/DeepSeek-V3","Qwen/QwQ-32B"]
MODEL_LIST = [
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
@@ -87,13 +98,16 @@ MODEL_LIST = [
XUNFEI,
ZHIPU_AI, GLM_4, GLM_4_PLUS, GLM_4_flash, GLM_4_LONG, GLM_4_ALLTOOLS, GLM_4_0520, GLM_4_AIR, GLM_4_AIRX,
MOONSHOT, MiniMax,
GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,
GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,GEMINI_20_flash_exp,
CLAUDE_3_OPUS, CLAUDE_3_OPUS_0229, CLAUDE_35_SONNET, CLAUDE_35_SONNET_1022, CLAUDE_35_SONNET_0620, CLAUDE_3_SONNET, CLAUDE_3_HAIKU, "claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3.5-sonnet",
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
QWEN, QWEN_TURBO, QWEN_PLUS, QWEN_MAX,
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o,
DEEPSEEK_CHAT, DEEPSEEK_REASONER,
MODELSCOPE
]
MODEL_LIST = MODEL_LIST + GITEE_AI_MODEL_LIST + MODELSCOPE_MODEL_LIST
# channel
FEISHU = "feishu"
DINGTALK = "dingtalk"
+4 -4
View File
@@ -2,7 +2,7 @@ from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from linkai import LinkAIClient, PushMsg
from config import conf, pconf, plugin_config, available_setting
from config import conf, pconf, plugin_config, available_setting, write_plugin_config
from plugins import PluginManager
import time
@@ -51,10 +51,10 @@ class ChatClient(LinkAIClient):
local_config["voice_reply_voice"] = False
if config.get("admin_password"):
if not plugin_config.get("Godcmd"):
plugin_config["Godcmd"] = {"password": config.get("admin_password"), "admin_users": []}
if not pconf("Godcmd"):
write_plugin_config({"Godcmd": {"password": config.get("admin_password"), "admin_users": []} })
else:
plugin_config["Godcmd"]["password"] = config.get("admin_password")
pconf("Godcmd")["password"] = config.get("admin_password")
PluginManager().instances["GODCMD"].reload()
if config.get("group_app_map") and pconf("linkai"):
+12
View File
@@ -171,6 +171,9 @@ available_setting = {
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
"moonshot_api_key": "",
"moonshot_base_url": "https://api.moonshot.cn/v1/chat/completions",
#魔搭社区 平台配置
"modelscope_api_key": "",
"modelscope_base_url": "https://api-inference.modelscope.cn/v1/chat/completions",
# LinkAI平台配置
"use_linkai": False,
"linkai_api_key": "",
@@ -179,6 +182,7 @@ available_setting = {
"Minimax_api_key": "",
"Minimax_group_id": "",
"Minimax_base_url": "",
"web_port": 9899,
}
@@ -341,6 +345,14 @@ def write_plugin_config(pconf: dict):
for k in pconf:
plugin_config[k.lower()] = pconf[k]
def remove_plugin_config(name: str):
"""
移除待重新加载的插件全局配置
:param name: 待重载的插件名
"""
global plugin_config
plugin_config.pop(name.lower(), None)
def pconf(plugin_name: str) -> dict:
"""
+3 -2
View File
@@ -339,7 +339,8 @@ class Godcmd(Plugin):
ok, result = True, "配置已重载"
elif cmd == "resetall":
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI,
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT]:
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT,
const.MODELSCOPE]:
channel.cancel_all_session()
bot.sessions.clear_all_session()
ok, result = True, "重置所有会话成功"
@@ -477,7 +478,7 @@ class Godcmd(Plugin):
return model
def reload(self):
gconf = plugin_config[self.name]
gconf = pconf(self.name)
if gconf:
if gconf.get("password"):
self.password = gconf["password"]
+5 -3
View File
@@ -201,12 +201,14 @@ class LinkAI(Plugin):
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
if context.type.name in ["FILE", "SHARING"]:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
else:
# 非群聊场景使用全局app_code
app_code = conf().get("linkai_app_code")
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
if context.type.name in ["FILE", "SHARING"]:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
# 基础条件:总开关开启且消息类型符合要求
base_enabled = (
@@ -289,7 +291,7 @@ class LinkAI(Plugin):
plugin_conf = json.load(f)
plugin_conf["midjourney"]["enabled"] = False
plugin_conf["summary"]["enabled"] = False
plugin_config["linkai"] = plugin_conf
write_plugin_config({"linkai": plugin_conf})
return plugin_conf
except Exception as e:
logger.exception(e)
+7 -3
View File
@@ -12,11 +12,14 @@ class LinkSummary:
def summary_file(self, file_path: str, app_code: str):
file_body = {
"file": open(file_path, "rb"),
"name": file_path.split("/")[-1],
"name": file_path.split("/")[-1]
}
body = {
"app_code": app_code
}
url = self.base_url() + "/v1/summary/file"
res = requests.post(url, headers=self.headers(), files=file_body, timeout=(5, 300))
logger.info(f"[LinkSum] file summary, app_code={app_code}")
res = requests.post(url, headers=self.headers(), files=file_body, data=body, timeout=(5, 300))
return self._parse_summary_res(res)
def summary_url(self, url: str, app_code: str):
@@ -25,6 +28,7 @@ class LinkSummary:
"url": url,
"app_code": app_code
}
logger.info(f"[LinkSum] url summary, app_code={app_code}")
res = requests.post(url=self.base_url() + "/v1/summary/url", headers=self.headers(), json=body, timeout=(5, 180))
return self._parse_summary_res(res)
@@ -50,7 +54,7 @@ class LinkSummary:
def _parse_summary_res(self, res):
if res.status_code == 200:
res = res.json()
logger.debug(f"[LinkSum] url summary, res={res}")
logger.debug(f"[LinkSum] summary result, res={res}")
if res.get("code") == 200:
data = res.get("data")
return {
+16 -13
View File
@@ -31,17 +31,20 @@ class Util:
@staticmethod
def fetch_app_plugin(app_code: str, plugin_name: str) -> bool:
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
params = {"app_code": app_code}
res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
if res.status_code == 200:
plugins = res.json().get("data").get("plugins")
for plugin in plugins:
if plugin.get("name") and plugin.get("name") == plugin_name:
return True
return False
else:
logger.warning(f"[LinkAI] find app info exception, res={res}")
try:
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
params = {"app_code": app_code}
res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
if res.status_code == 200:
plugins = res.json().get("data").get("plugins")
for plugin in plugins:
if plugin.get("name") and plugin.get("name") == plugin_name:
return True
return False
else:
logger.warning(f"[LinkAI] find app info exception, res={res}")
return False
except Exception as e:
return False
+3 -3
View File
@@ -1,6 +1,6 @@
import os
import json
from config import pconf, plugin_config, conf
from config import pconf, plugin_config, conf, write_plugin_config
from common.log import logger
@@ -24,13 +24,13 @@ class Plugin:
plugin_conf = json.load(f)
# 写入全局配置内存
plugin_config[self.name] = plugin_conf
write_plugin_config({self.name: plugin_conf})
logger.debug(f"loading plugin config, plugin_name={self.name}, conf={plugin_conf}")
return plugin_conf
def save_config(self, config: dict):
try:
plugin_config[self.name] = config
write_plugin_config({self.name: config})
# 写入全局配置
global_config_path = "./plugins/config.json"
if os.path.exists(global_config_path):
+6 -1
View File
@@ -9,7 +9,7 @@ import sys
from common.log import logger
from common.singleton import singleton
from common.sorted_dict import SortedDict
from config import conf, write_plugin_config
from config import conf, remove_plugin_config, write_plugin_config
from .event import *
@@ -151,6 +151,8 @@ class PluginManager:
self.disable_plugin(name)
failed_plugins.append(name)
continue
if name in self.instances:
self.instances[name].handlers.clear()
self.instances[name] = instance
for event in instance.handlers:
if event not in self.listening_plugins:
@@ -161,10 +163,13 @@ class PluginManager:
def reload_plugin(self, name: str):
name = name.upper()
remove_plugin_config(name)
if name in self.instances:
for event in self.listening_plugins:
if name in self.listening_plugins[event]:
self.listening_plugins[event].remove(name)
if name in self.instances:
self.instances[name].handlers.clear()
del self.instances[name]
self.activate_plugins()
return True
+2 -1
View File
@@ -99,7 +99,7 @@ class Role(Plugin):
if e_context["context"].type != ContextType.TEXT:
return
btype = Bridge().get_bot_type("chat")
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI]:
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI,const.MODELSCOPE]:
logger.debug(f'不支持的bot: {btype}')
return
bot = Bridge().get_bot("chat")
@@ -180,6 +180,7 @@ class Role(Plugin):
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
else:
e_context["context"]["generate_breaked_by"] = EventAction.BREAK
prompt = self.roleplays[sessionid].action(content)
e_context["context"].type = ContextType.TEXT
e_context["context"].content = prompt
-4
View File
@@ -12,10 +12,6 @@
"url": "https://github.com/lanvent/plugin_summary.git",
"desc": "总结聊天记录的插件"
},
"timetask": {
"url": "https://github.com/haikerapples/timetask.git",
"desc": "一款定时任务系统的插件"
},
"Apilot": {
"url": "https://github.com/6vision/Apilot.git",
"desc": "通过api直接查询早报、热榜、快递、天气等实用信息的插件"
+3
View File
@@ -44,3 +44,6 @@ zhipuai>=2.0.1
# tongyi qwen new sdk
dashscope
# tencentcloud sdk
tencentcloud-sdk-python>=3.0.0
+1
View File
@@ -8,3 +8,4 @@ Pillow
pre-commit
web.py
linkai>=0.0.6.0
+4
View File
@@ -50,4 +50,8 @@ def create_voice(voice_type):
from voice.xunfei.xunfei_voice import XunfeiVoice
return XunfeiVoice()
elif voice_type == "tencent":
from voice.tencent.tencent_voice import TencentVoice
return TencentVoice()
raise RuntimeError
+5
View File
@@ -0,0 +1,5 @@
{
"voice_type": 1003,
"secret_id": "YOUR_SECRET_ID",
"secret_key": "YOUR_SECRET_KEY"
}
+119
View File
@@ -0,0 +1,119 @@
import json
import base64
import os
import time
from voice.voice import Voice
from common.log import logger
from tencentcloud.common import credential
from tencentcloud.asr.v20190614 import asr_client, models as asr_models
from tencentcloud.tts.v20190823 import tts_client, models as tts_models
from bridge.reply import Reply, ReplyType
from common.tmp_dir import TmpDir
class TencentVoice(Voice):
def __init__(self):
super().__init__()
self.secret_id = None
self.secret_key = None
self.voice_type = 1003
self._load_config()
def _load_config(self):
"""
从本地配置文件加载配置
"""
try:
config_path = os.path.join(os.path.dirname(__file__), 'config.json')
with open(config_path, 'r') as f:
config = json.load(f)
self.secret_id = config.get('secret_id')
self.secret_key = config.get('secret_key')
self.voice_type = config.get('voice_type', self.voice_type)
if not self.secret_id or not self.secret_key:
logger.error("[Tencent] Missing credentials in config.json")
except Exception as e:
logger.error(f"[Tencent] Failed to load config: {e}")
def setup(self, config):
"""
设置配置信息(保留此方法用于向后兼容)
"""
pass
def voiceToText(self, voice_file):
"""
将语音文件转换为文本
"""
try:
# 实例化认证对象
cred = credential.Credential(self.secret_id, self.secret_key)
# 实例化客户端
client = asr_client.AsrClient(cred, "ap-guangzhou")
# 读取音频文件
with open(voice_file, 'rb') as f:
audio_data = f.read()
# 进行base64编码
base64_audio = base64.b64encode(audio_data).decode('utf-8')
# 构造请求对象
req = asr_models.SentenceRecognitionRequest()
req.ProjectId = 0
req.SubServiceType = 2
req.EngSerViceType = "16k_zh"
req.SourceType = 1
req.VoiceFormat = "wav"
req.UsrAudioKey = "voice_recognition"
req.Data = base64_audio
# 发起请求
resp = client.SentenceRecognition(req)
# 解析结果
if resp.Result:
logger.info("[Tencent] Voice to text success: {}".format(resp.Result))
return Reply(ReplyType.TEXT, resp.Result)
else:
logger.warning("[Tencent] Voice to text failed")
return Reply(ReplyType.ERROR, "腾讯语音识别失败")
except Exception as e:
logger.error("[Tencent] Voice to text error: {}".format(e))
return Reply(ReplyType.ERROR, "腾讯语音识别出错:{}".format(str(e)))
def textToVoice(self, text):
"""
将文本转换为语音
"""
try:
cred = credential.Credential(self.secret_id, self.secret_key)
client = tts_client.TtsClient(cred, "ap-guangzhou")
req = tts_models.TextToVoiceRequest()
req.Text = text
req.SessionId = str(int(time.time()))
req.Volume = 5
req.Speed = 0
req.ProjectId = 0
req.ModelType = 1
req.PrimaryLanguage = 1
req.SampleRate = 16000
req.VoiceType = self.voice_type # 客服女声
response = client.TextToVoice(req)
if response.Audio:
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
with open(fileName, "wb") as f:
f.write(base64.b64decode(response.Audio))
logger.info("[Tencent] textToVoice text={} voice file name={}".format(text, fileName))
return Reply(ReplyType.VOICE, fileName)
else:
logger.error("[Tencent] textToVoice failed")
return Reply(ReplyType.ERROR, "腾讯语音合成失败")
except Exception as e:
logger.error("[Tencent] Text to voice error: {}".format(e))
return Reply(ReplyType.ERROR, "腾讯语音合成出错:{}".format(str(e)))