Compare commits

...

75 Commits

Author SHA1 Message Date
Saboteur7 16324e7283 Merge pull request #2407 from ayasa520/fix_reloadp
fix(plugin): fix reloadp command not taking effect
2024-12-13 15:39:33 +08:00
Saboteur7 9f7e2e1572 Merge pull request #2413 from ayasa520/fix-scanp
fix: Memory leak caused by scanp command due to handler's reference of plugin instance
2024-12-13 14:57:22 +08:00
vision 857ce1d530 Merge pull request #2398 from stonyz/web-channel
增加web channel
2024-12-13 11:45:01 +08:00
vision be0d72775d Merge pull request #2423 from 6vision/reedme_update_docker_deploy
update readme
2024-12-13 11:41:17 +08:00
vision 7832a2495b Merge pull request #2422 from printlndarling/master
add: add gemini-2.0-flash-exp model
2024-12-13 11:35:26 +08:00
6vision 0506b7f735 update readme 2024-12-13 11:25:36 +08:00
繁星_逐梦 4c0b7942f0 add: gemini-2.0-flash-exp model 2024-12-12 22:22:14 +08:00
繁星_逐梦 651c840c4a add: gemini-2.0-flash-exp model 2024-12-12 22:19:13 +08:00
rikka 2a351ca415 fix(reloadp): clear handlers when reloading plugin to avoid memory leaks 2024-12-05 00:33:00 +08:00
rikka 49b7106d71 fix: Memory leak caused by scanp command due to handler's reference to plugin instance.
close #2412
2024-12-03 22:39:56 +08:00
zhayujie 8bf633f539 Merge pull request #2408 from 6vision/fix-summary-image
图像识别逻辑优化
2024-12-02 21:53:52 +08:00
6vision 0f8efcb4b0 图像识别逻辑优化 2024-12-02 21:16:59 +08:00
Rikka c567641c5c fix(plugin): fix reloadp command not taking effect
- Use write_plugin_config() instead of directly modifying plugin_config dict
- Add remove_plugin_config() to clear plugin config before reload
- Update plugins to use pconf() and write_plugin_config() for better config management
2024-12-02 16:38:21 +08:00
vision bdc3820382 Merge pull request #2405 from 6vision/role-plugin-linkai
Linkai bot is compatible with the role plugin.
2024-12-02 12:16:30 +08:00
6vision 33a69a7907 Linkai bot is compatible with the role plugin. 2024-12-02 12:13:26 +08:00
vision a4d0e9bbc3 Merge pull request #2401 from 6vision/plugins_source_update
插件列表更新
2024-11-29 11:09:27 +08:00
6vision afc753e1d2 插件列表更新 2024-11-29 11:07:16 +08:00
zhayujie e641a41224 Update README.md 2024-11-28 21:48:42 +08:00
vision 79305c0632 Merge pull request #2400 from 6vision/readme_update
readme update
2024-11-28 12:59:00 +08:00
6vision ef2ce3f09d 说明文档更新 2024-11-28 12:41:00 +08:00
Stony 71c18c04fc 增加web channel 2024-11-27 08:53:13 +08:00
Saboteur7 cf84e57f81 fix: add exception handling 2024-11-15 11:58:10 +08:00
vision 9421d44579 Merge pull request #2373 from 6vision/summary_app_code
Buy using app code, supports custom summary prompt .
2024-11-07 20:16:53 +08:00
6vision 5cd2ae8cc8 Summary supports app_code 2024-11-06 21:45:03 +08:00
vision 22d67b3a59 Merge pull request #2364 from 6vision/1031
1.7.3 release readme
2024-10-31 14:44:55 +08:00
6vision e102cbb8c4 1.7.3 release readme 2024-10-31 14:39:11 +08:00
vision d90eeb7ee4 Merge pull request #2363 from 6vision/linkai_plugin
Summary and MJ  support can be configured through LinkAI platform app plugins
2024-10-31 11:50:53 +08:00
vision 1989d53031 Merge pull request #2361 from 6vision/claude_model_update
Claude model update
2024-10-31 11:50:11 +08:00
6vision 04ef0907b4 Summary and MJ support can be configured through LinkAI platform app plugins. 2024-10-31 11:15:44 +08:00
6vision 517b43561c Merge branch 'claude_model_update' of git@github.com:6vision/chatgpt-on-wechat.git into claude_model_update 2024-10-28 00:32:46 +08:00
6vision ccb8c7227f Support setting base URL and proxy for Claude model. Also support reset command. 2024-10-28 00:32:05 +08:00
vision 9fbfeeb04f Merge branch 'zhayujie:master' into claude_model_update 2024-10-27 23:43:16 +08:00
6vision 8b753a5a1f Signed-off-by: 6vision <vision_wangpc@sina.com> 2024-10-27 21:44:06 +08:00
6vision d25cab0627 Claude model supports system prompts. 2024-10-27 21:37:58 +08:00
6vision 84da0a8a35 feat:update claude-35-sonnet model 2024-10-24 20:57:03 +08:00
vision 6f665cffba Merge pull request #2354 from 6vision/group_patpat_note
fix: group patpat notes
2024-10-24 19:53:18 +08:00
6vision aea8ac2e97 Signed-off-by: 6vision <vision_wangpc@sina.com> 2024-10-24 19:48:50 +08:00
vision 8418fa7b45 Merge pull request #2344 from 6vision/markdown_format_display
Optimize markdown format display
2024-10-21 10:27:03 +08:00
6vision 9cc4d0ee07 Optimize markdown format display 2024-10-21 10:23:39 +08:00
Saboteur7 da60831c44 fix: fixed the version of qrcode dependency 2024-10-19 16:14:49 +08:00
Saboteur7 0773174a20 Merge branch 'master' of github.com:zhayujie/chatgpt-on-wechat 2024-10-19 15:55:04 +08:00
Saboteur7 70e007d8ca fix: try to solve the unresponsiveness problem 2024-10-19 15:49:57 +08:00
vision fcc4d02c2f Merge pull request #2339 from 6vision/master
Optimize Gemini model character statistics
2024-10-14 12:19:27 +08:00
vision f4a5f00593 Merge branch 'zhayujie:master' into master 2024-10-14 12:18:33 +08:00
6vision 1170ed6566 Optimize Gemini model character statistics 2024-10-14 12:17:10 +08:00
zhayujie 883f0d449b Merge pull request #2317 from 6vision/master
feat: add install.sh and run.sh
2024-09-26 16:43:56 +08:00
6vision f4c62e7844 update install.sh url 2024-09-26 16:43:12 +08:00
6vision f0d212a9d2 Merge branch 'master' of github.com:6vision/chatgpt-on-wechat 2024-09-26 16:02:19 +08:00
6vision 76a8974034 update run.sh 2024-09-26 16:01:44 +08:00
vision 0614e822f4 Merge branch 'zhayujie:master' into master 2024-09-26 13:07:45 +08:00
vision 6f682c9a2e Merge pull request #2311 from cmgzn/master
fix: gemini doesn't receive system messages...
2024-09-26 13:04:47 +08:00
6vision a9fdbc31c5 update date 2024-09-26 13:02:38 +08:00
cmgzn 086fdb5856 fix gemini logger 2024-09-26 02:49:52 +01:00
6vision 63c8ef4f17 feat: install.sh and run.sh 2024-09-26 00:34:52 +08:00
zhayujie 736f6523c7 Merge branch 'master' into master 2024-09-25 23:11:13 +08:00
vision 8b0b360d25 Merge pull request #2288 from KuroIVeko/patch-3
Support more models from Zhipu AI
2024-09-25 22:28:16 +08:00
vision 80b84e2ee6 Merge pull request #2277 from KuroIVeko/patch-1
Lower Gemini's safety thresholds
2024-09-25 22:24:20 +08:00
vision b5b7d86f7b Merge pull request #2278 from 6vision/moonshoot
fix: "model":"mooshoot", which defaults to "moonshot-v1-32k".
2024-09-25 22:10:40 +08:00
cmgzn f20d704390 fix: gemini doesn't receive system messages; change session to gpt method, add system messages as user messages to the gemini, and logging historical messages 2024-09-20 09:10:21 +01:00
vision e4e1e2e944 Merge pull request #2306 from 6vision/master
fix: Linkai voice configuration
2024-09-18 19:43:41 +08:00
vision 6bc7eeb4cc Merge branch 'zhayujie:master' into master 2024-09-18 19:41:23 +08:00
6vision 656ed5de7b fix: LinkAI voice onfiguration 2024-09-18 19:40:51 +08:00
zhayujie a11d695c78 Merge pull request #2300 from 6vision/master
feat: support o1-preview and o1-mini model
2024-09-13 10:50:04 +08:00
6vision c4f9acd5c5 update 2024-09-13 10:48:51 +08:00
6vision 5ef929dc42 o1 model support #model 2024-09-13 10:21:38 +08:00
6vision c8cf27b544 feat: support o1-preview and o1-mini model 2024-09-13 10:13:23 +08:00
vision bb5ecfc398 Merge pull request #2298 from 6vision/error_print_ascii_windows
Handle ASCII QR code print error on Windows
2024-09-11 22:35:30 +08:00
6vision c91e7c35bb Remove unused imports 2024-09-11 22:34:33 +08:00
6vision 532d56df2d Handle ASCII QR code print error on Windows 2024-09-11 22:30:25 +08:00
KurolVeko 111ad44029 Update const.py 2024-09-05 11:07:06 +08:00
KurolVeko 6b02bae957 Update bridge.py 2024-09-05 10:59:57 +08:00
vision 6831743416 Merge pull request #2286 from 6vision/gpt
feat: support gpt-4o-2024-08-06 model
2024-09-04 18:44:08 +08:00
6vision 63e2f42636 feat: support gpt-4o-2024-08-06 model 2024-09-04 18:39:29 +08:00
6vision f6e6805453 fix: "model":"mooshoot", which defaults to "moonshot-v1-32k". 2024-08-31 16:09:10 +08:00
KurolVeko ad77ad8f2b Lower Gemini's safety thresholds
Gemini's default safety thresholds are set too high, resulting in frequent censorship of generated text. I have lowered the thresholds for all four safety categories according to Google's documentation.
2024-08-30 17:00:51 +08:00
33 changed files with 888 additions and 113 deletions
+13 -4
View File
@@ -40,13 +40,16 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
**企业服务和产品咨询** 可联系产品顾问:
<img width="160" src="https://img-1317903499.cos.ap-guangzhou.myqcloud.com/docs/github-product-consult.png">
<img width="160" src="https://cdn.link-ai.tech/consultant-s.jpg">
<br>
# 🏷 更新日志
>**2024.10.31** [1.7.3版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.3) 程序稳定性提升、数据库功能、Claude模型优化、linkai插件优化、离线通知
>**2024.08.02** [1.7.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.9) 新增 讯飞4.0 模型、知识库引用来源展示、相关插件优化
>**2024.09.26** [1.7.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.2) 和 [1.7.1版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.1) 文心,讯飞等模型优化、o1 模型、快速安装和管理脚本
>**2024.08.02** [1.7.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.0) 新增 讯飞4.0 模型、知识库引用来源展示、相关插件优化
>**2024.07.19** [1.6.9版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.9) 新增 gpt-4o-mini 模型、阿里语音识别、企微应用渠道路由优化
@@ -80,8 +83,13 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
# 🚀 快速开始
快速开始详细文档:[项目搭建文档](https://docs.link-ai.tech/cow/quick-start)
- 快速开始详细文档:[项目搭建文档](https://docs.link-ai.tech/cow/quick-start)
- 快速安装脚本,详细使用指导:[一键安装启动脚本](https://github.com/zhayujie/chatgpt-on-wechat/wiki/%E4%B8%80%E9%94%AE%E5%AE%89%E8%A3%85%E5%90%AF%E5%8A%A8%E8%84%9A%E6%9C%AC)
```bash
bash <(curl -sS https://cdn.link-ai.tech/code/cow/install.sh)
```
- 项目管理脚本,详细使用指导:[项目管理脚本](https://github.com/zhayujie/chatgpt-on-wechat/wiki/%E9%A1%B9%E7%9B%AE%E7%AE%A1%E7%90%86%E8%84%9A%E6%9C%AC)
## 一、准备
### 1. 账号注册
@@ -136,6 +144,7 @@ pip3 install -r requirements-optional.txt
{
"model": "gpt-3.5-turbo", # 模型名称, 支持 gpt-3.5-turbo, gpt-4, gpt-4-turbo, wenxin, xunfei, glm-4, claude-3-haiku, moonshot
"open_ai_api_key": "YOUR API KEY", # 如果使用openAI模型则填入上面创建的 OpenAI API KEY
"open_ai_api_base": "https://api.openai.com/v1", # OpenAI接口代理地址
"proxy": "", # 代理客户端的ip和端口,国内环境开启代理的需要填写该项,如 "127.0.0.1:7890"
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
@@ -267,7 +276,7 @@ sudo docker logs -f chatgpt-on-wechat
volumes:
- ./config.json:/app/plugins/config.json
```
**注**:采用docker方式部署的详细教程可以参考:[docker部署CoW项目](https://www.wangpc.cc/ai/docker-deploy-cow/)
### 4. Railway部署
> Railway 每月提供5刀和最多500小时的免费额度。 (07.11更新: 目前大部分账号已无法免费部署)
+1 -1
View File
@@ -27,7 +27,7 @@ def sigterm_handler_wrap(_signo):
def start_channel(channel_name: str):
channel = channel_factory.create_channel(channel_name)
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app", "wework",
if channel_name in ["wx", "wxy", "terminal", "wechatmp","web", "wechatmp_service", "wechatcom_app", "wework",
const.FEISHU, const.DINGTALK]:
PluginManager().load_plugins()
+12 -4
View File
@@ -5,7 +5,7 @@ import time
import openai
import openai.error
import requests
from common import const
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
@@ -15,7 +15,7 @@ from bridge.reply import Reply, ReplyType
from common.log import logger
from common.token_bucket import TokenBucket
from config import conf, load_config
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
# OpenAI对话模型API (可用)
class ChatGPTBot(Bot, OpenAIImage):
@@ -30,10 +30,12 @@ class ChatGPTBot(Bot, OpenAIImage):
openai.proxy = proxy
if conf().get("rate_limit_chatgpt"):
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
conf_model = conf().get("model") or "gpt-3.5-turbo"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
# o1相关模型不支持system prompt,暂时用文心模型的session
self.args = {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"model": conf_model, # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p": conf().get("top_p", 1),
@@ -42,6 +44,12 @@ class ChatGPTBot(Bot, OpenAIImage):
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
}
# o1相关模型固定了部分参数,暂时去掉
if conf_model in [const.O1, const.O1_MINI]:
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"]
for key in remove_keys:
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误
def reply(self, query, context=None):
# acquire reply content
+2 -2
View File
@@ -57,7 +57,7 @@ class ChatGPTSession(Session):
def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
if model in ["wenxin", "xunfei", const.GEMINI]:
if model in ["wenxin", "xunfei"] or model.startswith(const.GEMINI):
return num_tokens_by_character(messages)
import tiktoken
@@ -67,7 +67,7 @@ def num_tokens_from_messages(messages, model):
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview",
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW, const.GPT4_TURBO_01_25,
const.GPT_4o, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO]:
const.GPT_4o, const.GPT_4O_0806, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO]:
return num_tokens_from_messages(messages, model="gpt-4")
elif model.startswith("claude-3"):
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
+17 -20
View File
@@ -8,12 +8,12 @@ import anthropic
from bot.bot import Bot
from bot.openai.open_ai_image import OpenAIImage
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.gemini.google_gemini_bot import GoogleGeminiBot
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from bot.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from common import const
from config import conf
user_session = dict()
@@ -23,17 +23,14 @@ user_session = dict()
class ClaudeAPIBot(Bot, OpenAIImage):
def __init__(self):
super().__init__()
proxy = conf().get("proxy", None)
base_url = conf().get("open_ai_api_base", None) # 复用"open_ai_api_base"参数作为base_url
self.claudeClient = anthropic.Anthropic(
api_key=conf().get("claude_api_key")
api_key=conf().get("claude_api_key"),
proxies=proxy if proxy else None,
base_url=base_url if base_url else None
)
openai.api_key = conf().get("open_ai_api_key")
if conf().get("open_ai_api_base"):
openai.api_base = conf().get("open_ai_api_base")
proxy = conf().get("proxy")
if proxy:
openai.proxy = proxy
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "text-davinci-003")
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "text-davinci-003")
def reply(self, query, context=None):
# acquire reply content
@@ -76,14 +73,14 @@ class ClaudeAPIBot(Bot, OpenAIImage):
reply = Reply(ReplyType.ERROR, retstring)
return reply
def reply_text(self, session: ChatGPTSession, retry_count=0):
def reply_text(self, session: BaiduWenxinSession, retry_count=0):
try:
actual_model = self._model_mapping(conf().get("model"))
response = self.claudeClient.messages.create(
model=actual_model,
max_tokens=1024,
# system=conf().get("system"),
messages=GoogleGeminiBot.filter_messages(session.messages)
max_tokens=4096,
system=conf().get("character_desc", ""),
messages=session.messages
)
# response = openai.Completion.create(prompt=str(session), **self.args)
res_content = response.content[0].text.strip().replace("<|endoftext|>", "")
@@ -97,7 +94,7 @@ class ClaudeAPIBot(Bot, OpenAIImage):
}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
result = {"total_tokens": 0, "completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, openai.error.RateLimitError):
logger.warn("[CLAUDE_API] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
@@ -125,11 +122,11 @@ class ClaudeAPIBot(Bot, OpenAIImage):
def _model_mapping(self, model) -> str:
if model == "claude-3-opus":
return "claude-3-opus-20240229"
return const.CLAUDE_3_OPUS
elif model == "claude-3-sonnet":
return "claude-3-sonnet-20240229"
return const.CLAUDE_3_SONNET
elif model == "claude-3-haiku":
return "claude-3-haiku-20240307"
return const.CLAUDE_3_HAIKU
elif model == "claude-3.5-sonnet":
return "claude-3-5-sonnet-20240620"
return const.CLAUDE_35_SONNET
return model
+46 -12
View File
@@ -13,7 +13,9 @@ from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from google.generativeai.types import HarmCategory, HarmBlockThreshold
# OpenAI对话模型API (可用)
@@ -22,8 +24,8 @@ class GoogleGeminiBot(Bot):
def __init__(self):
super().__init__()
self.api_key = conf().get("gemini_api_key")
# 复用文心的token计算方式
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "gpt-3.5-turbo")
# 复用chatGPT的token计算方式
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
self.model = conf().get("model") or "gemini-pro"
if self.model == "gemini":
self.model = "gemini-pro"
@@ -36,18 +38,44 @@ class GoogleGeminiBot(Bot):
session_id = context["session_id"]
session = self.sessions.session_query(query, session_id)
gemini_messages = self._convert_to_gemini_messages(self.filter_messages(session.messages))
logger.debug(f"[Gemini] messages={gemini_messages}")
genai.configure(api_key=self.api_key)
model = genai.GenerativeModel(self.model)
response = model.generate_content(gemini_messages)
reply_text = response.text
self.sessions.session_reply(reply_text, session_id)
logger.info(f"[Gemini] reply={reply_text}")
return Reply(ReplyType.TEXT, reply_text)
# 添加安全设置
safety_settings = {
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
}
# 生成回复,包含安全设置
response = model.generate_content(
gemini_messages,
safety_settings=safety_settings
)
if response.candidates and response.candidates[0].content:
reply_text = response.candidates[0].content.parts[0].text
logger.info(f"[Gemini] reply={reply_text}")
self.sessions.session_reply(reply_text, session_id)
return Reply(ReplyType.TEXT, reply_text)
else:
# 没有有效响应内容,可能内容被屏蔽,输出安全评分
logger.warning("[Gemini] No valid response generated. Checking safety ratings.")
if hasattr(response, 'candidates') and response.candidates:
for rating in response.candidates[0].safety_ratings:
logger.warning(f"Safety rating: {rating.category} - {rating.probability}")
error_message = "No valid response generated due to safety constraints."
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
except Exception as e:
logger.error("[Gemini] fetch reply error, may contain unsafe content")
logger.error(e)
return Reply(ReplyType.ERROR, "invoke [Gemini] api failed!")
logger.error(f"[Gemini] Error generating response: {str(e)}", exc_info=True)
error_message = "Failed to invoke [Gemini] api!"
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
def _convert_to_gemini_messages(self, messages: list):
res = []
for msg in messages:
@@ -55,6 +83,8 @@ class GoogleGeminiBot(Bot):
role = "user"
elif msg.get("role") == "assistant":
role = "model"
elif msg.get("role") == "system":
role = "user"
else:
continue
res.append({
@@ -71,7 +101,11 @@ class GoogleGeminiBot(Bot):
return res
for i in range(len(messages) - 1, -1, -1):
message = messages[i]
if message.get("role") != turn:
role = message.get("role")
if role == "system":
res.insert(0, message)
continue
if role != turn:
continue
res.insert(0, message)
if turn == "user":
+4 -1
View File
@@ -19,8 +19,11 @@ class MoonshotBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(MoonshotSession, model=conf().get("model") or "moonshot-v1-128k")
model = conf().get("model") or "moonshot-v1-128k"
if model == "moonshot":
model = "moonshot-v1-32k"
self.args = {
"model": conf().get("model") or "moonshot-v1-128k", # 对话模型的名称
"model": model, # 对话模型的名称
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
"top_p": conf().get("top_p", 1.0), # 使用默认值
}
+2 -2
View File
@@ -38,7 +38,7 @@ class Bridge(object):
self.btype["chat"] = const.QWEN_DASHSCOPE
if model_type and model_type.startswith("gemini"):
self.btype["chat"] = const.GEMINI
if model_type in [const.ZHIPU_AI]:
if model_type and model_type.startswith("glm"):
self.btype["chat"] = const.ZHIPU_AI
if model_type and model_type.startswith("claude-3"):
self.btype["chat"] = const.CLAUDEAPI
@@ -46,7 +46,7 @@ class Bridge(object):
if model_type in ["claude"]:
self.btype["chat"] = const.CLAUDEAI
if model_type in ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
self.btype["chat"] = const.MOONSHOT
if model_type in ["abab6.5-chat"]:
+3
View File
@@ -21,6 +21,9 @@ def create_channel(channel_type) -> Channel:
elif channel_type == "terminal":
from channel.terminal.terminal_channel import TerminalChannel
ch = TerminalChannel()
elif channel_type == 'web':
from channel.web.web_channel import WebChannel
ch = WebChannel()
elif channel_type == "wechatmp":
from channel.wechatmp.wechatmp_channel import WechatMPChannel
ch = WechatMPChannel(passive_reply=True)
+14 -11
View File
@@ -337,24 +337,27 @@ class ChatChannel(Channel):
while True:
with self.lock:
session_ids = list(self.sessions.keys())
for session_id in session_ids:
for session_id in session_ids:
with self.lock:
context_queue, semaphore = self.sessions[session_id]
if semaphore.acquire(blocking=False): # 等线程处理完毕才能删除
if not context_queue.empty():
context = context_queue.get()
logger.debug("[chat_channel] consume context: {}".format(context))
future: Future = handler_pool.submit(self._handle, context)
future.add_done_callback(self._thread_pool_callback(session_id, context=context))
if semaphore.acquire(blocking=False): # 等线程处理完毕才能删除
if not context_queue.empty():
context = context_queue.get()
logger.debug("[chat_channel] consume context: {}".format(context))
future: Future = handler_pool.submit(self._handle, context)
future.add_done_callback(self._thread_pool_callback(session_id, context=context))
with self.lock:
if session_id not in self.futures:
self.futures[session_id] = []
self.futures[session_id].append(future)
elif semaphore._initial_value == semaphore._value + 1: # 除了当前,没有任务再申请到信号量,说明所有任务都处理完毕
elif semaphore._initial_value == semaphore._value + 1: # 除了当前,没有任务再申请到信号量,说明所有任务都处理完毕
with self.lock:
self.futures[session_id] = [t for t in self.futures[session_id] if not t.done()]
assert len(self.futures[session_id]) == 0, "thread pool error"
del self.sessions[session_id]
else:
semaphore.release()
time.sleep(0.1)
else:
semaphore.release()
time.sleep(0.2)
# 取消session_id对应的所有任务,只能取消排队的消息和已提交线程池但未执行的任务
def cancel_session(self, session_id):
+7
View File
@@ -0,0 +1,7 @@
# Web channel
使用SSEServer-Sent Events,服务器推送事件)实现,提供了一个默认的网页。也可以自己实现加入api
#使用方法
- 在配置文件中channel_type填入web即可
- 访问地址 http://localhost:9899
- port可以在配置项 web_port中设置
+165
View File
@@ -0,0 +1,165 @@
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Chat</title>
<style>
body {
font-family: Arial, sans-serif;
display: flex;
flex-direction: column;
height: 100vh; /* 占据所有高度 */
margin: 0;
/* background-color: #f8f9fa; */
}
#chat-container {
display: flex;
flex-direction: column;
width: 100%;
max-width: 500px;
margin: auto;
border: 1px solid #ccc;
border-radius: 5px;
overflow: hidden;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
flex: 1; /* 使聊天容器占据剩余空间 */
}
#messages {
flex-direction: column;
display: flex;
flex: 1;
overflow-y: auto;
padding: 10px;
overflow-y: auto;
border-bottom: 1px solid #ccc;
background-color: #ffffff;
}
.message {
margin: 5px 0; /* 间隔 */
padding: 10px 15px; /* 内边距 */
border-radius: 15px; /* 圆角 */
max-width: 80%; /* 限制最大宽度 */
min-width: 80px; /* 设置最小宽度 */
min-height: 40px; /* 设置最小高度 */
word-wrap: break-word; /* 自动换行 */
position: relative; /* 时间戳定位 */
display: inline-block; /* 内容自适应宽度 */
box-sizing: border-box; /* 包括内边距和边框 */
flex-shrink: 0; /* 禁止高度被压缩 */
word-wrap: break-word; /* 自动换行,防止单行过长 */
white-space: normal; /* 允许正常换行 */
overflow: hidden;
}
.bot {
background-color: #f1f1f1; /* 灰色背景 */
color: black; /* 黑色字体 */
align-self: flex-start; /* 左对齐 */
margin-right: auto; /* 确保消息靠左 */
text-align: left; /* 内容左对齐 */
}
.user {
background-color: #2bc840; /* 蓝色背景 */
align-self: flex-end; /* 右对齐 */
margin-left: auto; /* 确保消息靠右 */
text-align: left; /* 内容左对齐 */
}
.timestamp {
font-size: 0.8em; /* 时间戳字体大小 */
color: rgba(0, 0, 0, 0.5); /* 半透明黑色 */
margin-bottom: 5px; /* 时间戳下方间距 */
display: block; /* 时间戳独占一行 */
}
#input-container {
display: flex;
padding: 10px;
background-color: #ffffff;
border-top: 1px solid #ccc;
}
#input {
flex: 1;
padding: 10px;
border: 1px solid #ccc;
border-radius: 5px;
margin-right: 10px;
}
#send {
padding: 10px;
border: none;
background-color: #007bff;
color: white;
border-radius: 5px;
cursor: pointer;
}
#send:hover {
background-color: #0056b3;
}
</style>
</head>
<body>
<div id="chat-container">
<div id="messages"></div>
<div id="input-container">
<input type="text" id="input" placeholder="输入消息..." />
<button id="send">发送</button>
</div>
</div>
<script>
const messagesDiv = document.getElementById('messages');
const input = document.getElementById('input');
const sendButton = document.getElementById('send');
// 生成唯一的 user_id
const userId = 'user_' + Math.random().toString(36).substr(2, 9);
// 连接 SSE
const eventSource = new EventSource(`/sse/${userId}`);
eventSource.onmessage = function(event) {
const message = JSON.parse(event.data);
const messageDiv = document.createElement('div');
messageDiv.className = 'message bot';
const timestamp = new Date(message.timestamp).toLocaleTimeString(); // 假设消息中有时间戳
messageDiv.innerHTML = `<div class="timestamp">${timestamp}</div>${message.content}`; // 显示时间
messagesDiv.appendChild(messageDiv);
messagesDiv.scrollTop = messagesDiv.scrollHeight; // 滚动到底部
};
sendButton.onclick = function() {
sendMessage();
};
input.addEventListener('keypress', function(event) {
if (event.key === 'Enter') {
sendMessage();
event.preventDefault(); // 防止换行
}
});
function sendMessage() {
const userMessage = input.value;
if (userMessage) {
const timestamp = new Date().toISOString(); // 获取当前时间戳
fetch('/message', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ user_id: userId, message: userMessage, timestamp: timestamp }) // 发送时间戳
});
const messageDiv = document.createElement('div');
messageDiv.className = 'message user';
const userTimestamp = new Date().toLocaleTimeString(); // 获取当前时间
messageDiv.innerHTML = `<div class="timestamp">${userTimestamp}</div>${userMessage}`; // 显示时间
messagesDiv.appendChild(messageDiv);
messagesDiv.scrollTop = messagesDiv.scrollHeight; // 滚动到底部
input.value = ''; // 清空输入框
}
}
</script>
</body>
</html>
+204
View File
@@ -0,0 +1,204 @@
import sys
import time
import web
import json
from queue import Queue
from bridge.context import *
from bridge.reply import Reply, ReplyType
from channel.chat_channel import ChatChannel, check_prefix
from channel.chat_message import ChatMessage
from common.log import logger
from common.singleton import singleton
from config import conf
import os
class WebMessage(ChatMessage):
def __init__(
self,
msg_id,
content,
ctype=ContextType.TEXT,
from_user_id="User",
to_user_id="Chatgpt",
other_user_id="Chatgpt",
):
self.msg_id = msg_id
self.ctype = ctype
self.content = content
self.from_user_id = from_user_id
self.to_user_id = to_user_id
self.other_user_id = other_user_id
@singleton
class WebChannel(ChatChannel):
NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE]
_instance = None
# def __new__(cls):
# if cls._instance is None:
# cls._instance = super(WebChannel, cls).__new__(cls)
# return cls._instance
def __init__(self):
super().__init__()
self.message_queues = {} # 为每个用户存储一个消息队列
self.msg_id_counter = 0 # 添加消息ID计数器
def _generate_msg_id(self):
"""生成唯一的消息ID"""
self.msg_id_counter += 1
return str(int(time.time())) + str(self.msg_id_counter)
def send(self, reply: Reply, context: Context):
try:
if reply.type == ReplyType.IMAGE:
from PIL import Image
image_storage = reply.content
image_storage.seek(0)
img = Image.open(image_storage)
print("<IMAGE>")
img.show()
elif reply.type == ReplyType.IMAGE_URL:
import io
import requests
from PIL import Image
img_url = reply.content
pic_res = requests.get(img_url, stream=True)
image_storage = io.BytesIO()
for block in pic_res.iter_content(1024):
image_storage.write(block)
image_storage.seek(0)
img = Image.open(image_storage)
print(img_url)
img.show()
else:
print(reply.content)
# 获取用户ID,如果没有则使用默认值
# user_id = getattr(context.get("session", None), "session_id", "default_user")
user_id = context["receiver"]
# 确保用户有对应的消息队列
if user_id not in self.message_queues:
self.message_queues[user_id] = Queue()
# 将消息放入对应用户的队列
message_data = {
"type": str(reply.type),
"content": reply.content,
"timestamp": time.time()
}
self.message_queues[user_id].put(message_data)
logger.debug(f"Message queued for user {user_id}")
except Exception as e:
logger.error(f"Error in send method: {e}")
raise
def sse_handler(self, user_id):
"""
Handle Server-Sent Events (SSE) for real-time communication.
"""
web.header('Content-Type', 'text/event-stream')
web.header('Cache-Control', 'no-cache')
web.header('Connection', 'keep-alive')
# 确保用户有消息队列
if user_id not in self.message_queues:
self.message_queues[user_id] = Queue()
try:
while True:
try:
# 发送心跳
yield f": heartbeat\n\n"
# 非阻塞方式获取消息
if not self.message_queues[user_id].empty():
message = self.message_queues[user_id].get_nowait()
yield f"data: {json.dumps(message)}\n\n"
time.sleep(0.5)
except Exception as e:
logger.error(f"SSE Error: {e}")
break
finally:
# 清理资源
if user_id in self.message_queues:
# 只有当队列为空时才删除
if self.message_queues[user_id].empty():
del self.message_queues[user_id]
def post_message(self):
"""
Handle incoming messages from users via POST request.
"""
try:
data = web.data() # 获取原始POST数据
json_data = json.loads(data)
user_id = json_data.get('user_id', 'default_user')
prompt = json_data.get('message', '')
except json.JSONDecodeError:
return json.dumps({"status": "error", "message": "Invalid JSON"})
except Exception as e:
return json.dumps({"status": "error", "message": str(e)})
if not prompt:
return json.dumps({"status": "error", "message": "No message provided"})
try:
msg_id = self._generate_msg_id()
context = self._compose_context(ContextType.TEXT, prompt, msg=WebMessage(msg_id,
prompt,
from_user_id=user_id,
other_user_id = user_id
))
context["isgroup"] = False
# context["session"] = web.storage(session_id=user_id)
if not context:
return json.dumps({"status": "error", "message": "Failed to process message"})
self.produce(context)
return json.dumps({"status": "success", "message": "Message received"})
except Exception as e:
logger.error(f"Error processing message: {e}")
return json.dumps({"status": "error", "message": "Internal server error"})
def chat_page(self):
"""Serve the chat HTML page."""
file_path = os.path.join(os.path.dirname(__file__), 'chat.html') # 使用绝对路径
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def startup(self):
logger.setLevel("WARN")
print("\nWeb Channel is running. Send POST requests to /message to send messages.")
urls = (
'/sse/(.+)', 'SSEHandler', # 修改路由以接收用户ID
'/message', 'MessageHandler',
'/chat', 'ChatHandler',
)
port = conf().get("web_port", 9899)
app = web.application(urls, globals(), autoreload=False)
web.httpserver.runsimple(app.wsgifunc(), ("0.0.0.0", port))
class SSEHandler:
def GET(self, user_id):
return WebChannel().sse_handler(user_id)
class MessageHandler:
def POST(self):
return WebChannel().post_message()
class ChatHandler:
def GET(self):
return WebChannel().chat_page()
+7 -2
View File
@@ -20,7 +20,7 @@ from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from common.utils import convert_webp_to_png
from common.utils import convert_webp_to_png, remove_markdown_symbol
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
@@ -100,7 +100,10 @@ def qrCallback(uuid, status, qrcode):
qr = qrcode.QRCode(border=1)
qr.add_data(url)
qr.make(fit=True)
qr.print_ascii(invert=True)
try:
qr.print_ascii(invert=True)
except UnicodeEncodeError:
print("ASCII QR code printing failed due to encoding issues.")
@singleton
@@ -210,9 +213,11 @@ class WechatChannel(ChatChannel):
def send(self, reply: Reply, context: Context):
receiver = context["receiver"]
if reply.type == ReplyType.TEXT:
reply.content = remove_markdown_symbol(reply.content)
itchat.send(reply.content, toUserName=receiver)
logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver))
elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO:
reply.content = remove_markdown_symbol(reply.content)
itchat.send(reply.content, toUserName=receiver)
logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver))
elif reply.type == ReplyType.VOICE:
+10 -5
View File
@@ -55,6 +55,16 @@ class WechatMessage(ChatMessage):
self.ctype = ContextType.EXIT_GROUP
self.content = itchat_msg["Content"]
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif any(note_patpat in itchat_msg["Content"] for note_patpat in notes_patpat): # 若有任何在notes_patpat列表中的字符串出现在NOTE中:
self.ctype = ContextType.PATPAT
self.content = itchat_msg["Content"]
if "拍了拍我" in itchat_msg["Content"]: # 识别中文
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif "tickled my" in itchat_msg["Content"] or "tickled me" in itchat_msg["Content"]:
self.actual_user_nickname = re.findall(r'^(.*?)(?:tickled my|tickled me)', itchat_msg["Content"])[0]
else:
raise NotImplementedError("Unsupported note message: " + itchat_msg["Content"])
elif "你已添加了" in itchat_msg["Content"]: #通过好友请求
self.ctype = ContextType.ACCEPT_FRIEND
@@ -62,11 +72,6 @@ class WechatMessage(ChatMessage):
elif any(note_patpat in itchat_msg["Content"] for note_patpat in notes_patpat): # 若有任何在notes_patpat列表中的字符串出现在NOTE中:
self.ctype = ContextType.PATPAT
self.content = itchat_msg["Content"]
if is_group:
if "拍了拍我" in itchat_msg["Content"]: # 识别中文
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif ("tickled my" in itchat_msg["Content"] or "tickled me" in itchat_msg["Content"]):
self.actual_user_nickname = re.findall(r'^(.*?)(?:tickled my|tickled me)', itchat_msg["Content"])[0]
else:
raise NotImplementedError("Unsupported note message: " + itchat_msg["Content"])
elif itchat_msg["Type"] == ATTACHMENT:
+2 -2
View File
@@ -17,7 +17,7 @@ from channel.wechatcom.wechatcomapp_client import WechatComAppClient
from channel.wechatcom.wechatcomapp_message import WechatComAppMessage
from common.log import logger
from common.singleton import singleton
from common.utils import compress_imgfile, fsize, split_string_by_utf8_length, convert_webp_to_png
from common.utils import compress_imgfile, fsize, split_string_by_utf8_length, convert_webp_to_png, remove_markdown_symbol
from config import conf, subscribe_msg
from voice.audio_convert import any_to_amr, split_audio
@@ -52,7 +52,7 @@ class WechatComAppChannel(ChatChannel):
def send(self, reply: Reply, context: Context):
receiver = context["receiver"]
if reply.type in [ReplyType.TEXT, ReplyType.ERROR, ReplyType.INFO]:
reply_text = reply.content
reply_text = remove_markdown_symbol(reply.content)
texts = split_string_by_utf8_length(reply_text, MAX_UTF8_LEN)
if len(texts) > 1:
logger.info("[wechatcom] text too long, split into {} parts".format(len(texts)))
+2 -2
View File
@@ -19,7 +19,7 @@ from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_client import WechatMPClient
from common.log import logger
from common.singleton import singleton
from common.utils import split_string_by_utf8_length
from common.utils import split_string_by_utf8_length, remove_markdown_symbol
from config import conf
from voice.audio_convert import any_to_mp3, split_audio
@@ -81,7 +81,7 @@ class WechatMPChannel(ChatChannel):
receiver = context["receiver"]
if self.passive_reply:
if reply.type == ReplyType.TEXT or reply.type == ReplyType.INFO or reply.type == ReplyType.ERROR:
reply_text = reply.content
reply_text = remove_markdown_symbol(reply.content)
logger.info("[wechatmp] text cached, receiver {}\n{}".format(receiver, reply_text))
self.cache_dict[receiver].append(("text", reply_text))
elif reply.type == ReplyType.VOICE:
+32 -4
View File
@@ -24,6 +24,7 @@ GPT35_0125 = "gpt-3.5-turbo-0125"
GPT35_1106 = "gpt-3.5-turbo-1106"
GPT_4o = "gpt-4o"
GPT_4O_0806 = "gpt-4o-2024-08-06"
GPT4_TURBO = "gpt-4-turbo"
GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview"
GPT4_TURBO_04_09 = "gpt-4-turbo-2024-04-09"
@@ -37,6 +38,9 @@ GPT4_32k = "gpt-4-32k"
GPT4_06_13 = "gpt-4-0613"
GPT4_32k_06_13 = "gpt-4-32k-0613"
O1 = "o1-preview"
O1_MINI = "o1-mini"
WHISPER_1 = "whisper-1"
TTS_1 = "tts-1"
TTS_1_HD = "tts-1-hd"
@@ -55,14 +59,38 @@ LINKAI_4o = "linkai-4o"
GEMINI_PRO = "gemini-1.0-pro"
GEMINI_15_flash = "gemini-1.5-flash"
GEMINI_15_PRO = "gemini-1.5-pro"
GEMINI_20_flash_exp = "gemini-2.0-flash-exp"
GLM_4 = "glm-4"
GLM_4_PLUS = "glm-4-plus"
GLM_4_flash = "glm-4-flash"
GLM_4_LONG = "glm-4-long"
GLM_4_ALLTOOLS = "glm-4-alltools"
GLM_4_0520 = "glm-4-0520"
GLM_4_AIR = "glm-4-air"
GLM_4_AIRX = "glm-4-airx"
CLAUDE_3_OPUS = "claude-3-opus-latest"
CLAUDE_3_OPUS_0229 = "claude-3-opus-20240229"
CLAUDE_35_SONNET = "claude-3-5-sonnet-latest" # 带 latest 标签的模型名称,会不断更新指向最新发布的模型
CLAUDE_35_SONNET_1022 = "claude-3-5-sonnet-20241022" # 带具体日期的模型名称,会固定为该日期发布的模型
CLAUDE_35_SONNET_0620 = "claude-3-5-sonnet-20240620"
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
MODEL_LIST = [
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
GPT_4o, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
WEN_XIN, WEN_XIN_4,
XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax,
GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,
"claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3-opus-20240229", "claude-3.5-sonnet",
XUNFEI,
ZHIPU_AI, GLM_4, GLM_4_PLUS, GLM_4_flash, GLM_4_LONG, GLM_4_ALLTOOLS, GLM_4_0520, GLM_4_AIR, GLM_4_AIRX,
MOONSHOT, MiniMax,
GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,GEMINI_20_flash_exp,
CLAUDE_3_OPUS, CLAUDE_3_OPUS_0229, CLAUDE_35_SONNET, CLAUDE_35_SONNET_1022, CLAUDE_35_SONNET_0620, CLAUDE_3_SONNET, CLAUDE_3_HAIKU, "claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3.5-sonnet",
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
QWEN, QWEN_TURBO, QWEN_PLUS, QWEN_MAX,
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o
+9 -4
View File
@@ -2,7 +2,7 @@ from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from linkai import LinkAIClient, PushMsg
from config import conf, pconf, plugin_config, available_setting
from config import conf, pconf, plugin_config, available_setting, write_plugin_config
from plugins import PluginManager
import time
@@ -42,14 +42,19 @@ class ChatClient(LinkAIClient):
if reply_voice_mode:
if reply_voice_mode == "voice_reply_voice":
local_config["voice_reply_voice"] = True
local_config["always_reply_voice"] = False
elif reply_voice_mode == "always_reply_voice":
local_config["always_reply_voice"] = True
local_config["voice_reply_voice"] = True
elif reply_voice_mode == "no_reply_voice":
local_config["always_reply_voice"] = False
local_config["voice_reply_voice"] = False
if config.get("admin_password"):
if not plugin_config.get("Godcmd"):
plugin_config["Godcmd"] = {"password": config.get("admin_password"), "admin_users": []}
if not pconf("Godcmd"):
write_plugin_config({"Godcmd": {"password": config.get("admin_password"), "admin_users": []} })
else:
plugin_config["Godcmd"]["password"] = config.get("admin_password")
pconf("Godcmd")["password"] = config.get("admin_password")
PluginManager().instances["GODCMD"].reload()
if config.get("group_app_map") and pconf("linkai"):
+8
View File
@@ -1,5 +1,6 @@
import io
import os
import re
from urllib.parse import urlparse
from PIL import Image
from common.log import logger
@@ -68,3 +69,10 @@ def convert_webp_to_png(webp_image):
except Exception as e:
logger.error(f"Failed to convert WEBP to PNG: {e}")
raise
def remove_markdown_symbol(text: str):
# 移除markdown格式,目前先移除**
if not text:
return text
return re.sub(r'\*\*(.*?)\*\*', r'\1', text)
+9
View File
@@ -179,6 +179,7 @@ available_setting = {
"Minimax_api_key": "",
"Minimax_group_id": "",
"Minimax_base_url": "",
"web_port": 9899,
}
@@ -341,6 +342,14 @@ def write_plugin_config(pconf: dict):
for k in pconf:
plugin_config[k.lower()] = pconf[k]
def remove_plugin_config(name: str):
"""
移除待重新加载的插件全局配置
:param name: 待重载的插件名
"""
global plugin_config
plugin_config.pop(name.lower(), None)
def pconf(plugin_name: str) -> dict:
"""
+2 -2
View File
@@ -313,7 +313,7 @@ class Godcmd(Plugin):
except Exception as e:
ok, result = False, "你没有设置私有GPT模型"
elif cmd == "reset":
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI]:
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.CLAUDEAPI]:
bot.sessions.clear_session(session_id)
if Bridge().chat_bots.get(bottype):
Bridge().chat_bots.get(bottype).sessions.clear_session(session_id)
@@ -477,7 +477,7 @@ class Godcmd(Plugin):
return model
def reload(self):
gconf = plugin_config[self.name]
gconf = pconf(self.name)
if gconf:
if gconf.get("password"):
self.password = gconf["password"]
+2
View File
@@ -98,6 +98,8 @@
如果不想创建 `plugins/linkai/config.json` 配置,可以直接通过 `$linkai sum open` 指令开启该功能。
也可以通过私聊(全局 `config.json` 中的 `linkai_app_code`)或者群聊绑定(通过`group_app_map`参数配置)的应用来开启该功能:在LinkAI平台 [应用配置](https://link-ai.tech/console/factory) 里添加并开启**内容总结**插件。
#### 使用
功能开启后,向机器人发送 **文件****分享链接卡片**、**图片** 即可生成摘要,进一步可以与文件或链接的内容进行多轮对话。如果需要关闭某种类型的内容总结,设置 `summary`配置中的type字段即可。
+51 -14
View File
@@ -9,7 +9,7 @@ from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
from config import plugin_config
from config import plugin_config, conf
@plugins.register(
@@ -28,7 +28,7 @@ class LinkAI(Plugin):
# 未加载到配置,使用模板中的配置
self.config = self._load_config_template()
if self.config:
self.mj_bot = MJBot(self.config.get("midjourney"))
self.mj_bot = MJBot(self.config.get("midjourney"), self._fetch_group_app_code)
self.sum_config = {}
if self.config:
self.sum_config = self.config.get("summary")
@@ -56,7 +56,8 @@ class LinkAI(Plugin):
return
if context.type != ContextType.IMAGE:
_send_info(e_context, "正在为你加速生成摘要,请稍后")
res = LinkSummary().summary_file(file_path)
app_code = self._fetch_app_code(context)
res = LinkSummary().summary_file(file_path, app_code)
if not res:
if context.type != ContextType.IMAGE:
_set_reply_text("因为神秘力量无法获取内容,请稍后再试吧", e_context, level=ReplyType.TEXT)
@@ -74,7 +75,8 @@ class LinkAI(Plugin):
if not LinkSummary().check_url(context.content):
return
_send_info(e_context, "正在为你加速生成摘要,请稍后")
res = LinkSummary().summary_url(context.content)
app_code = self._fetch_app_code(context)
res = LinkSummary().summary_url(context.content, app_code)
if not res:
_set_reply_text("因为神秘力量无法获取文章内容,请稍后再试吧~", e_context, level=ReplyType.TEXT)
return
@@ -169,7 +171,7 @@ class LinkAI(Plugin):
return
if len(cmd) == 3 and cmd[1] == "sum" and (cmd[2] == "open" or cmd[2] == "close"):
# 知识库开关指令
# 总结对话开关指令
if not Util.is_admin(e_context):
_set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
return
@@ -192,14 +194,36 @@ class LinkAI(Plugin):
return
def _is_summary_open(self, context) -> bool:
if not self.sum_config or not self.sum_config.get("enabled"):
return False
if context.kwargs.get("isgroup") and not self.sum_config.get("group_enabled"):
return False
support_type = self.sum_config.get("type") or ["FILE", "SHARING"]
if context.type.name not in support_type and context.type.name != "TEXT":
return False
return True
# 获取远程应用插件状态
remote_enabled = False
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
if app_code:
if context.type.name in ["FILE", "SHARING"]:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
else:
# 非群聊场景使用全局app_code
app_code = conf().get("linkai_app_code")
if app_code:
if context.type.name in ["FILE", "SHARING"]:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
# 基础条件:总开关开启且消息类型符合要求
base_enabled = (
self.sum_config
and self.sum_config.get("enabled")
and (context.type.name in (
self.sum_config.get("type") or ["FILE", "SHARING"]) or context.type.name == "TEXT")
)
# 群聊:需要满足(总开关和群开关)或远程插件开启
if context.kwargs.get("isgroup"):
return (base_enabled and self.sum_config.get("group_enabled")) or remote_enabled
# 非群聊:只需要满足总开关或远程插件开启
return base_enabled or remote_enabled
# LinkAI 对话任务处理
def _is_chat_task(self, e_context: EventContext):
@@ -230,6 +254,19 @@ class LinkAI(Plugin):
app_code = group_mapping.get(group_name) or group_mapping.get("ALL_GROUP")
return app_code
def _fetch_app_code(self, context) -> str:
"""
根据主配置或者群聊名称获取对应的应用code,优先获取群聊配置的应用code
:param context: 上下文
:return: 应用code
"""
app_code = conf().get("linkai_app_code")
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
return app_code
def get_help_text(self, verbose=False, **kwargs):
trigger_prefix = _get_trigger_prefix()
help_text = "用于集成 LinkAI 提供的知识库、Midjourney绘画、文档总结、联网搜索等能力。\n\n"
@@ -254,7 +291,7 @@ class LinkAI(Plugin):
plugin_conf = json.load(f)
plugin_conf["midjourney"]["enabled"] = False
plugin_conf["summary"]["enabled"] = False
plugin_config["linkai"] = plugin_conf
write_plugin_config({"linkai": plugin_conf})
return plugin_conf
except Exception as e:
logger.exception(e)
+25 -4
View File
@@ -10,6 +10,7 @@ from bridge.context import ContextType
from plugins import EventContext, EventAction
from .utils import Util
INVALID_REQUEST = 410
NOT_FOUND_ORIGIN_IMAGE = 461
NOT_FOUND_TASK = 462
@@ -67,10 +68,11 @@ class MJTask:
# midjourney bot
class MJBot:
def __init__(self, config):
def __init__(self, config, fetch_group_app_code):
self.base_url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/img/midjourney"
self.headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
self.config = config
self.fetch_group_app_code = fetch_group_app_code
self.tasks = {}
self.temp_dict = {}
self.tasks_lock = threading.Lock()
@@ -98,7 +100,7 @@ class MJBot:
return TaskType.VARIATION
elif cmd_list[0].lower() == f"{trigger_prefix}mjr":
return TaskType.RESET
elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix") and self.config.get("enabled"):
elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix") and self._is_mj_open(context):
return TaskType.GENERATE
def process_mj_task(self, mj_type: TaskType, e_context: EventContext):
@@ -129,8 +131,8 @@ class MJBot:
self._set_reply_text(f"Midjourney绘画已{tips_text}", e_context, level=ReplyType.INFO)
return
if not self.config.get("enabled"):
logger.warn("Midjourney绘画未开启,请查看 plugins/linkai/config.json 中的配置")
if not self._is_mj_open(context):
logger.warn("Midjourney绘画未开启,请查看 plugins/linkai/config.json 中的配置,或者在LinkAI平台 应用中添加/打开”MJ“插件")
self._set_reply_text(f"Midjourney绘画未开启", e_context, level=ReplyType.INFO)
return
@@ -409,6 +411,25 @@ class MJBot:
result.append(task)
return result
def _is_mj_open(self, context) -> bool:
# 获取远程应用插件状态
remote_enabled = False
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self.fetch_group_app_code(group_name)
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "Midjourney")
else:
# 非群聊场景使用全局app_code
app_code = conf().get("linkai_app_code")
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "Midjourney")
# 本地配置
base_enabled = self.config.get("enabled")
return base_enabled or remote_enabled
def _send(channel, reply: Reply, context, retry_cnt=0):
try:
+12 -6
View File
@@ -9,20 +9,26 @@ class LinkSummary:
def __init__(self):
pass
def summary_file(self, file_path: str):
def summary_file(self, file_path: str, app_code: str):
file_body = {
"file": open(file_path, "rb"),
"name": file_path.split("/")[-1],
"name": file_path.split("/")[-1]
}
body = {
"app_code": app_code
}
url = self.base_url() + "/v1/summary/file"
res = requests.post(url, headers=self.headers(), files=file_body, timeout=(5, 300))
logger.info(f"[LinkSum] file summary, app_code={app_code}")
res = requests.post(url, headers=self.headers(), files=file_body, data=body, timeout=(5, 300))
return self._parse_summary_res(res)
def summary_url(self, url: str):
def summary_url(self, url: str, app_code: str):
url = html.unescape(url)
body = {
"url": url
"url": url,
"app_code": app_code
}
logger.info(f"[LinkSum] url summary, app_code={app_code}")
res = requests.post(url=self.base_url() + "/v1/summary/url", headers=self.headers(), json=body, timeout=(5, 180))
return self._parse_summary_res(res)
@@ -48,7 +54,7 @@ class LinkSummary:
def _parse_summary_res(self, res):
if res.status_code == 200:
res = res.json()
logger.debug(f"[LinkSum] url summary, res={res}")
logger.debug(f"[LinkSum] summary result, res={res}")
if res.get("code") == 200:
data = res.get("data")
return {
+23 -1
View File
@@ -1,7 +1,9 @@
import requests
from common.log import logger
from config import global_config
from bridge.reply import Reply, ReplyType
from plugins.event import EventContext, EventAction
from config import conf
class Util:
@staticmethod
@@ -26,3 +28,23 @@ class Util:
reply = Reply(level, content)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
@staticmethod
def fetch_app_plugin(app_code: str, plugin_name: str) -> bool:
try:
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
params = {"app_code": app_code}
res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
if res.status_code == 200:
plugins = res.json().get("data").get("plugins")
for plugin in plugins:
if plugin.get("name") and plugin.get("name") == plugin_name:
return True
return False
else:
logger.warning(f"[LinkAI] find app info exception, res={res}")
return False
except Exception as e:
return False
+3 -3
View File
@@ -1,6 +1,6 @@
import os
import json
from config import pconf, plugin_config, conf
from config import pconf, plugin_config, conf, write_plugin_config
from common.log import logger
@@ -24,13 +24,13 @@ class Plugin:
plugin_conf = json.load(f)
# 写入全局配置内存
plugin_config[self.name] = plugin_conf
write_plugin_config({self.name: plugin_conf})
logger.debug(f"loading plugin config, plugin_name={self.name}, conf={plugin_conf}")
return plugin_conf
def save_config(self, config: dict):
try:
plugin_config[self.name] = config
write_plugin_config({self.name: config})
# 写入全局配置
global_config_path = "./plugins/config.json"
if os.path.exists(global_config_path):
+6 -1
View File
@@ -9,7 +9,7 @@ import sys
from common.log import logger
from common.singleton import singleton
from common.sorted_dict import SortedDict
from config import conf, write_plugin_config
from config import conf, remove_plugin_config, write_plugin_config
from .event import *
@@ -151,6 +151,8 @@ class PluginManager:
self.disable_plugin(name)
failed_plugins.append(name)
continue
if name in self.instances:
self.instances[name].handlers.clear()
self.instances[name] = instance
for event in instance.handlers:
if event not in self.listening_plugins:
@@ -161,10 +163,13 @@ class PluginManager:
def reload_plugin(self, name: str):
name = name.upper()
remove_plugin_config(name)
if name in self.instances:
for event in self.listening_plugins:
if name in self.listening_plugins[event]:
self.listening_plugins[event].remove(name)
if name in self.instances:
self.instances[name].handlers.clear()
del self.instances[name]
self.activate_plugins()
return True
+1
View File
@@ -180,6 +180,7 @@ class Role(Plugin):
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
else:
e_context["context"]["generate_breaked_by"] = EventAction.BREAK
prompt = self.roleplays[sessionid].action(content)
e_context["context"].type = ContextType.TEXT
e_context["context"].content = prompt
-4
View File
@@ -12,10 +12,6 @@
"url": "https://github.com/lanvent/plugin_summary.git",
"desc": "总结聊天记录的插件"
},
"timetask": {
"url": "https://github.com/haikerapples/timetask.git",
"desc": "一款定时任务系统的插件"
},
"Apilot": {
"url": "https://github.com/6vision/Apilot.git",
"desc": "通过api直接查询早报、热榜、快递、天气等实用信息的插件"
+2 -2
View File
@@ -1,7 +1,7 @@
openai==0.27.8
HTMLParser>=0.0.2
PyQRCode>=1.2.1
qrcode>=7.4.2
PyQRCode==1.2.1
qrcode==7.4.2
requests>=2.28.2
chardet>=5.1.0
Pillow
+192
View File
@@ -0,0 +1,192 @@
#!/usr/bin/env bash
set -e
# 颜色定义
RED='\033[0;31m' # 红色
GREEN='\033[0;32m' # 绿色
YELLOW='\033[0;33m' # 黄色
BLUE='\033[0;34m' # 蓝色
NC='\033[0m' # 无颜色
# 获取当前脚本的目录
export BASE_DIR=$(cd "$(dirname "$0")"; pwd)
echo -e "${GREEN}📁 BASE_DIR: ${BASE_DIR}${NC}"
# 检查 config.json 文件是否存在
check_config_file() {
if [ ! -f "${BASE_DIR}/config.json" ]; then
echo -e "${RED}❌ 错误:未找到 config.json 文件。请确保 config.json 存在于当前目录。${NC}"
exit 1
fi
}
# 检查 Python 版本是否大于等于 3.7,并检查 pip 是否可用
check_python_version() {
if ! command -v python3 &> /dev/null; then
echo -e "${RED}❌ 错误:未找到 Python3。请安装 Python 3.7 或以上版本。${NC}"
exit 1
fi
PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
PYTHON_MAJOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f1)
PYTHON_MINOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f2)
if (( PYTHON_MAJOR < 3 || (PYTHON_MAJOR == 3 && PYTHON_MINOR < 7) )); then
echo -e "${RED}❌ 错误:Python 版本为 ${PYTHON_VERSION}。请安装 Python 3.7 或以上版本。${NC}"
exit 1
fi
if ! python3 -m pip --version &> /dev/null; then
echo -e "${RED}❌ 错误:未找到 pip。请安装 pip。${NC}"
exit 1
fi
}
# 检查并安装缺失的依赖
install_dependencies() {
echo -e "${YELLOW}⏳ 正在安装依赖...${NC}"
if [ ! -f "${BASE_DIR}/requirements.txt" ]; then
echo -e "${RED}❌ 错误:未找到 requirements.txt 文件。${NC}"
exit 1
fi
# 安装 requirements.txt 中的依赖,使用清华大学的 PyPI 镜像
pip3 install -r "${BASE_DIR}/requirements.txt" -i https://pypi.tuna.tsinghua.edu.cn/simple
# 处理 requirements-optional.txt(如果存在)
if [ -f "${BASE_DIR}/requirements-optional.txt" ]; then
echo -e "${YELLOW}⏳ 正在安装可选的依赖...${NC}"
pip3 install -r "${BASE_DIR}/requirements-optional.txt" -i https://pypi.tuna.tsinghua.edu.cn/simple
fi
}
# 启动项目
run_project() {
echo -e "${GREEN}🚀 准备启动项目...${NC}"
cd "${BASE_DIR}"
sleep 2
# 判断操作系统类型
OS_TYPE=$(uname)
if [[ "$OS_TYPE" == "Linux" ]]; then
# 在 Linux 上使用 setsid
setsid python3 "${BASE_DIR}/app.py" > "${BASE_DIR}/nohup.out" 2>&1 &
echo -e "${GREEN}🚀 正在启动 ChatGPT-on-WeChat (Linux)...${NC}"
elif [[ "$OS_TYPE" == "Darwin" ]]; then
# 在 macOS 上直接运行
python3 "${BASE_DIR}/app.py" > "${BASE_DIR}/nohup.out" 2>&1 &
echo -e "${GREEN}🚀 正在启动 ChatGPT-on-WeChat (macOS)...${NC}"
else
echo -e "${RED}❌ 错误:不支持的操作系统 ${OS_TYPE}${NC}"
exit 1
fi
sleep 2
# 显示日志输出,供用户扫码
tail -n 30 -f "${BASE_DIR}/nohup.out"
}
# 更新项目
update_project() {
echo -e "${GREEN}🔄 准备更新项目,现在停止项目...${NC}"
cd "${BASE_DIR}"
# 停止项目
stop_project
echo -e "${GREEN}🔄 开始更新项目...${NC}"
# 更新代码,从 git 仓库拉取最新代码
if [ -d .git ]; then
GIT_PULL_OUTPUT=$(git pull)
if [ $? -eq 0 ]; then
if [[ "$GIT_PULL_OUTPUT" == *"Already up to date."* ]]; then
echo -e "${GREEN}✅ 代码已经是最新的。${NC}"
else
echo -e "${GREEN}✅ 代码更新完成。${NC}"
fi
else
echo -e "${YELLOW}⚠️ 从 GitHub 更新失败,尝试切换到 Gitee 仓库...${NC}"
# 更改远程仓库为 Gitee
git remote set-url origin https://gitee.com/zhayujie/chatgpt-on-wechat.git
GIT_PULL_OUTPUT=$(git pull)
if [ $? -eq 0 ]; then
if [[ "$GIT_PULL_OUTPUT" == *"Already up to date."* ]]; then
echo -e "${GREEN}✅ 代码已经是最新的。${NC}"
else
echo -e "${GREEN}✅ 从 Gitee 更新成功。${NC}"
fi
else
echo -e "${RED}❌ 错误:从 Gitee 更新仍然失败,请检查网络连接。${NC}"
exit 1
fi
fi
else
echo -e "${RED}❌ 错误:当前目录不是 git 仓库,无法更新代码。${NC}"
exit 1
fi
# 安装依赖
install_dependencies
# 启动项目
run_project
}
# 停止项目
stop_project() {
echo -e "${GREEN}🛑 正在停止项目...${NC}"
cd "${BASE_DIR}"
pid=$(ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}')
if [ -z "$pid" ] ; then
echo -e "${YELLOW}⚠️ 未找到正在运行的 ChatGPT-on-WeChat。${NC}"
return
fi
echo -e "${GREEN}🛑 正在运行的 ChatGPT-on-WeChat (PID: ${pid})${NC}"
kill ${pid}
sleep 3
if ps -p $pid > /dev/null; then
echo -e "${YELLOW}⚠️ 进程未停止,尝试强制终止...${NC}"
kill -9 ${pid}
fi
echo -e "${GREEN}✅ 已停止 ChatGPT-on-WeChat (PID: ${pid})${NC}"
}
# 主函数,根据用户参数执行操作
case "$1" in
start)
check_config_file
check_python_version
run_project
;;
stop)
stop_project
;;
restart)
stop_project
check_config_file
check_python_version
run_project
;;
update)
check_config_file
check_python_version
update_project
;;
*)
echo -e "${YELLOW}=========================================${NC}"
echo -e "${YELLOW}用法:${GREEN}$0 ${BLUE}{start|stop|restart|update}${NC}"
echo -e "${YELLOW}示例:${NC}"
echo -e " ${GREEN}$0 ${BLUE}start${NC}"
echo -e " ${GREEN}$0 ${BLUE}stop${NC}"
echo -e " ${GREEN}$0 ${BLUE}restart${NC}"
echo -e " ${GREEN}$0 ${BLUE}update${NC}"
echo -e "${YELLOW}=========================================${NC}"
exit 1
;;
esac