Compare commits

..

49 Commits

Author SHA1 Message Date
zhayujie 883f0d449b Merge pull request #2317 from 6vision/master
feat: add install.sh and run.sh
2024-09-26 16:43:56 +08:00
6vision f4c62e7844 update install.sh url 2024-09-26 16:43:12 +08:00
6vision f0d212a9d2 Merge branch 'master' of github.com:6vision/chatgpt-on-wechat 2024-09-26 16:02:19 +08:00
6vision 76a8974034 update run.sh 2024-09-26 16:01:44 +08:00
vision 0614e822f4 Merge branch 'zhayujie:master' into master 2024-09-26 13:07:45 +08:00
vision 6f682c9a2e Merge pull request #2311 from cmgzn/master
fix: gemini doesn't receive system messages...
2024-09-26 13:04:47 +08:00
6vision a9fdbc31c5 update date 2024-09-26 13:02:38 +08:00
cmgzn 086fdb5856 fix gemini logger 2024-09-26 02:49:52 +01:00
6vision 63c8ef4f17 feat: install.sh and run.sh 2024-09-26 00:34:52 +08:00
zhayujie 736f6523c7 Merge branch 'master' into master 2024-09-25 23:11:13 +08:00
vision 8b0b360d25 Merge pull request #2288 from KuroIVeko/patch-3
Support more models from Zhipu AI
2024-09-25 22:28:16 +08:00
vision 80b84e2ee6 Merge pull request #2277 from KuroIVeko/patch-1
Lower Gemini's safety thresholds
2024-09-25 22:24:20 +08:00
vision b5b7d86f7b Merge pull request #2278 from 6vision/moonshoot
fix: "model":"mooshoot", which defaults to "moonshot-v1-32k".
2024-09-25 22:10:40 +08:00
cmgzn f20d704390 fix: gemini doesn't receive system messages; change session to gpt method, add system messages as user messages to the gemini, and logging historical messages 2024-09-20 09:10:21 +01:00
vision e4e1e2e944 Merge pull request #2306 from 6vision/master
fix: Linkai voice configuration
2024-09-18 19:43:41 +08:00
vision 6bc7eeb4cc Merge branch 'zhayujie:master' into master 2024-09-18 19:41:23 +08:00
6vision 656ed5de7b fix: LinkAI voice onfiguration 2024-09-18 19:40:51 +08:00
zhayujie a11d695c78 Merge pull request #2300 from 6vision/master
feat: support o1-preview and o1-mini model
2024-09-13 10:50:04 +08:00
6vision c4f9acd5c5 update 2024-09-13 10:48:51 +08:00
6vision 5ef929dc42 o1 model support #model 2024-09-13 10:21:38 +08:00
6vision c8cf27b544 feat: support o1-preview and o1-mini model 2024-09-13 10:13:23 +08:00
vision bb5ecfc398 Merge pull request #2298 from 6vision/error_print_ascii_windows
Handle ASCII QR code print error on Windows
2024-09-11 22:35:30 +08:00
6vision c91e7c35bb Remove unused imports 2024-09-11 22:34:33 +08:00
6vision 532d56df2d Handle ASCII QR code print error on Windows 2024-09-11 22:30:25 +08:00
KurolVeko 111ad44029 Update const.py 2024-09-05 11:07:06 +08:00
KurolVeko 6b02bae957 Update bridge.py 2024-09-05 10:59:57 +08:00
vision 6831743416 Merge pull request #2286 from 6vision/gpt
feat: support gpt-4o-2024-08-06 model
2024-09-04 18:44:08 +08:00
6vision 63e2f42636 feat: support gpt-4o-2024-08-06 model 2024-09-04 18:39:29 +08:00
6vision f6e6805453 fix: "model":"mooshoot", which defaults to "moonshot-v1-32k". 2024-08-31 16:09:10 +08:00
KurolVeko ad77ad8f2b Lower Gemini's safety thresholds
Gemini's default safety thresholds are set too high, resulting in frequent censorship of generated text. I have lowered the thresholds for all four safety categories according to Google's documentation.
2024-08-30 17:00:51 +08:00
Saboteur7 469524e8ae Merge pull request #2206 from VanJohnPK/master
fix azure voice error 修复Azure语音服务报错问题
2024-08-29 11:33:49 +08:00
Saboteur7 f4f55d5dfd Merge pull request #2247 from byang822/abacusoft-alex
wenxin character model supports prompt
2024-08-29 11:31:45 +08:00
Saboteur7 c248d0f3f4 Merge pull request #2262 from 6vision/cancel_wecom_subscribe
Cancel subscribe_msg of wechatcomapp channel
2024-08-29 11:31:04 +08:00
Saboteur7 648a04b513 Merge pull request #2265 from 6vision/feat0825
Support configuration whether to be @ in group chat.
2024-08-29 11:30:46 +08:00
vision bdc86c16ec Merge pull request #2268 from 6vision/xunfei_system_prompt
Xunfei supports system prompt(character_desc).
2024-08-27 20:46:07 +08:00
6vision 21efd17c17 Xunfei supports system prompt(character_desc). 2024-08-25 22:22:29 +08:00
Saboteur7 aaa75e7b62 Merge pull request #2267 from 6vision/master
Optimize the welcome message for new members.
2024-08-25 17:16:11 +08:00
6vision 6d0cef3152 Optimize the welcome message for new members. 2024-08-25 17:10:44 +08:00
Saboteur7 c18472289f Merge pull request #2207 from Abyss-Seeker/master
支持更多语言(英语)的微信客户端
2024-08-25 16:10:33 +08:00
6vision 02b7c70a81 Support configuration whether to be @ in group chat. 2024-08-25 15:13:25 +08:00
6vision 4eaa2b93c6 Cancel subscribe_msg of wechatcomapp channel 2024-08-22 22:03:04 +08:00
darkVinci d347905373 Merge pull request #1 from zhayujie/master
merge 15 commits
2024-08-21 11:21:31 +08:00
vision f495213b2c Merge pull request #2237 from 6vision/fix_role
Optimize log information printing
2024-08-17 17:01:08 +08:00
Alex Yang 9b125913ae wenxin character model supports prompt 2024-08-16 14:58:17 +08:00
6vision da81f05804 Optimize log information printing 2024-08-14 23:03:57 +08:00
Abyss-Seeker 9a371a4d4d Update wechat_message.py
加入更多英文适配(通过QR code加入群聊)
2024-08-06 23:30:32 +08:00
Abyss-Seeker 1e92828f1a 支持更多语言(英语)
加入了notes_join_group,notes_exit_group,notes_patpat列表,可以在加入群聊,退出群聊和拍一拍消息中匹配更多的字符。在此完成了英语(invited, removed, tickled)的匹配,使如果微信语言是英文的话也可以正常识别啦!同时,以后也可以通过加list和判断语句的方式支持更多语言!
2024-08-04 10:14:23 +08:00
Saboteur7 7e724b3fa3 Update README.md 2024-08-02 16:06:25 +08:00
Zheng 3effd5afd1 fix azure voice error 2024-07-30 17:10:02 +08:00
17 changed files with 344 additions and 50 deletions
+10 -1
View File
@@ -46,6 +46,10 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
# 🏷 更新日志
>**2024.09.26** [1.7.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.2) 和 [1.7.1版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.1) 文心,讯飞等模型优化、o1 模型、快速安装和管理脚本
>**2024.08.02** [1.7.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.0) 新增 讯飞4.0 模型、知识库引用来源展示、相关插件优化
>**2024.07.19** [1.6.9版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.9) 新增 gpt-4o-mini 模型、阿里语音识别、企微应用渠道路由优化
>**2024.07.05** [1.6.8版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.8) 和 [1.6.7版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.7)Claude3.5, Gemini 1.5 Pro, MiniMax模型、工作流图片输入、模型列表完善
@@ -78,8 +82,13 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
# 🚀 快速开始
快速开始详细文档:[项目搭建文档](https://docs.link-ai.tech/cow/quick-start)
- 快速开始详细文档:[项目搭建文档](https://docs.link-ai.tech/cow/quick-start)
- 快速安装脚本,详细使用指导:[一键安装启动脚本](https://github.com/zhayujie/chatgpt-on-wechat/wiki/%E4%B8%80%E9%94%AE%E5%AE%89%E8%A3%85%E5%90%AF%E5%8A%A8%E8%84%9A%E6%9C%AC)
```bash
bash <(curl -sS https://cdn.link-ai.tech/code/cow/install.sh)
```
- 项目管理脚本,详细使用指导:[项目管理脚本](https://github.com/zhayujie/chatgpt-on-wechat/wiki/%E9%A1%B9%E7%9B%AE%E7%AE%A1%E7%90%86%E8%84%9A%E6%9C%AC)
## 一、准备
### 1. 账号注册
+6 -1
View File
@@ -19,6 +19,11 @@ class BaiduWenxinBot(Bot):
def __init__(self):
super().__init__()
wenxin_model = conf().get("baidu_wenxin_model")
self.prompt_enabled = conf().get("baidu_wenxin_prompt_enabled")
if self.prompt_enabled:
self.prompt = conf().get("character_desc", "")
if self.prompt == "":
logger.warn("[BAIDU] Although you enabled model prompt, character_desc is not specified.")
if wenxin_model is not None:
wenxin_model = conf().get("baidu_wenxin_model") or "eb-instant"
else:
@@ -84,7 +89,7 @@ class BaiduWenxinBot(Bot):
headers = {
'Content-Type': 'application/json'
}
payload = {'messages': session.messages}
payload = {'messages': session.messages, 'system': self.prompt} if self.prompt_enabled else {'messages': session.messages}
response = requests.request("POST", url, headers=headers, data=json.dumps(payload))
response_text = json.loads(response.text)
logger.info(f"[BAIDU] response text={response_text}")
+12 -4
View File
@@ -5,7 +5,7 @@ import time
import openai
import openai.error
import requests
from common import const
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
@@ -15,7 +15,7 @@ from bridge.reply import Reply, ReplyType
from common.log import logger
from common.token_bucket import TokenBucket
from config import conf, load_config
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
# OpenAI对话模型API (可用)
class ChatGPTBot(Bot, OpenAIImage):
@@ -30,10 +30,12 @@ class ChatGPTBot(Bot, OpenAIImage):
openai.proxy = proxy
if conf().get("rate_limit_chatgpt"):
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
conf_model = conf().get("model") or "gpt-3.5-turbo"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
# o1相关模型不支持system prompt,暂时用文心模型的session
self.args = {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"model": conf_model, # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p": conf().get("top_p", 1),
@@ -42,6 +44,12 @@ class ChatGPTBot(Bot, OpenAIImage):
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
}
# o1相关模型固定了部分参数,暂时去掉
if conf_model in [const.O1, const.O1_MINI]:
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"]
for key in remove_keys:
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误
def reply(self, query, context=None):
# acquire reply content
+1 -1
View File
@@ -67,7 +67,7 @@ def num_tokens_from_messages(messages, model):
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview",
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW, const.GPT4_TURBO_01_25,
const.GPT_4o, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO]:
const.GPT_4o, const.GPT_4O_0806, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO]:
return num_tokens_from_messages(messages, model="gpt-4")
elif model.startswith("claude-3"):
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
+46 -12
View File
@@ -13,7 +13,9 @@ from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from google.generativeai.types import HarmCategory, HarmBlockThreshold
# OpenAI对话模型API (可用)
@@ -22,8 +24,8 @@ class GoogleGeminiBot(Bot):
def __init__(self):
super().__init__()
self.api_key = conf().get("gemini_api_key")
# 复用文心的token计算方式
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "gpt-3.5-turbo")
# 复用chatGPT的token计算方式
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
self.model = conf().get("model") or "gemini-pro"
if self.model == "gemini":
self.model = "gemini-pro"
@@ -36,18 +38,44 @@ class GoogleGeminiBot(Bot):
session_id = context["session_id"]
session = self.sessions.session_query(query, session_id)
gemini_messages = self._convert_to_gemini_messages(self.filter_messages(session.messages))
logger.debug(f"[Gemini] messages={gemini_messages}")
genai.configure(api_key=self.api_key)
model = genai.GenerativeModel(self.model)
response = model.generate_content(gemini_messages)
reply_text = response.text
self.sessions.session_reply(reply_text, session_id)
logger.info(f"[Gemini] reply={reply_text}")
return Reply(ReplyType.TEXT, reply_text)
# 添加安全设置
safety_settings = {
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
}
# 生成回复,包含安全设置
response = model.generate_content(
gemini_messages,
safety_settings=safety_settings
)
if response.candidates and response.candidates[0].content:
reply_text = response.candidates[0].content.parts[0].text
logger.info(f"[Gemini] reply={reply_text}")
self.sessions.session_reply(reply_text, session_id)
return Reply(ReplyType.TEXT, reply_text)
else:
# 没有有效响应内容,可能内容被屏蔽,输出安全评分
logger.warning("[Gemini] No valid response generated. Checking safety ratings.")
if hasattr(response, 'candidates') and response.candidates:
for rating in response.candidates[0].safety_ratings:
logger.warning(f"Safety rating: {rating.category} - {rating.probability}")
error_message = "No valid response generated due to safety constraints."
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
except Exception as e:
logger.error("[Gemini] fetch reply error, may contain unsafe content")
logger.error(e)
return Reply(ReplyType.ERROR, "invoke [Gemini] api failed!")
logger.error(f"[Gemini] Error generating response: {str(e)}", exc_info=True)
error_message = "Failed to invoke [Gemini] api!"
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
def _convert_to_gemini_messages(self, messages: list):
res = []
for msg in messages:
@@ -55,6 +83,8 @@ class GoogleGeminiBot(Bot):
role = "user"
elif msg.get("role") == "assistant":
role = "model"
elif msg.get("role") == "system":
role = "user"
else:
continue
res.append({
@@ -71,7 +101,11 @@ class GoogleGeminiBot(Bot):
return res
for i in range(len(messages) - 1, -1, -1):
message = messages[i]
if message.get("role") != turn:
role = message.get("role")
if role == "system":
res.insert(0, message)
continue
if role != turn:
continue
res.insert(0, message)
if turn == "user":
+4 -1
View File
@@ -19,8 +19,11 @@ class MoonshotBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(MoonshotSession, model=conf().get("model") or "moonshot-v1-128k")
model = conf().get("model") or "moonshot-v1-128k"
if model == "moonshot":
model = "moonshot-v1-32k"
self.args = {
"model": conf().get("model") or "moonshot-v1-128k", # 对话模型的名称
"model": model, # 对话模型的名称
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
"top_p": conf().get("top_p", 1.0), # 使用默认值
}
+2 -2
View File
@@ -3,7 +3,7 @@
import requests, json
from bot.bot import Bot
from bot.session_manager import SessionManager
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
@@ -53,7 +53,7 @@ class XunFeiBot(Bot):
self.host = urlparse(self.spark_url).netloc
self.path = urlparse(self.spark_url).path
# 和wenxin使用相同的session机制
self.sessions = SessionManager(BaiduWenxinSession, model=const.XUNFEI)
self.sessions = SessionManager(ChatGPTSession, model=const.XUNFEI)
def reply(self, query, context: Context = None) -> Reply:
if context.type == ContextType.TEXT:
+2 -2
View File
@@ -38,7 +38,7 @@ class Bridge(object):
self.btype["chat"] = const.QWEN_DASHSCOPE
if model_type and model_type.startswith("gemini"):
self.btype["chat"] = const.GEMINI
if model_type in [const.ZHIPU_AI]:
if model_type and model_type.startswith("glm"):
self.btype["chat"] = const.ZHIPU_AI
if model_type and model_type.startswith("claude-3"):
self.btype["chat"] = const.CLAUDEAPI
@@ -46,7 +46,7 @@ class Bridge(object):
if model_type in ["claude"]:
self.btype["chat"] = const.CLAUDEAI
if model_type in ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
self.btype["chat"] = const.MOONSHOT
if model_type in ["abab6.5-chat"]:
+5 -2
View File
@@ -100,7 +100,10 @@ def qrCallback(uuid, status, qrcode):
qr = qrcode.QRCode(border=1)
qr.add_data(url)
qr.make(fit=True)
qr.print_ascii(invert=True)
try:
qr.print_ascii(invert=True)
except UnicodeEncodeError:
print("ASCII QR code printing failed due to encoding issues.")
@singleton
@@ -202,7 +205,7 @@ class WechatChannel(ChatChannel):
logger.debug(f"[WX]receive attachment msg, file_name={cmsg.content}")
else:
logger.debug("[WX]receive group msg: {}".format(cmsg.content))
context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=True, msg=cmsg)
context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=True, msg=cmsg, no_need_at=conf().get("no_need_at", False))
if context:
self.produce(context)
+31 -14
View File
@@ -14,6 +14,11 @@ class WechatMessage(ChatMessage):
self.create_time = itchat_msg["CreateTime"]
self.is_group = is_group
notes_join_group = ["加入群聊", "加入了群聊", "invited", "joined"] # 可通过添加对应语言的加入群聊通知中的关键词适配更多
notes_bot_join_group = ["邀请你", "invited you", "You've joined", "你通过扫描"]
notes_exit_group = ["移出了群聊", "removed"] # 可通过添加对应语言的踢出群聊通知中的关键词适配更多
notes_patpat = ["拍了拍我", "tickled my", "tickled me"] # 可通过添加对应语言的拍一拍通知中的关键词适配更多
if itchat_msg["Type"] == TEXT:
self.ctype = ContextType.TEXT
self.content = itchat_msg["Text"]
@@ -26,30 +31,42 @@ class WechatMessage(ChatMessage):
self.content = TmpDir().path() + itchat_msg["FileName"] # content直接存临时目录路径
self._prepare_fn = lambda: itchat_msg.download(self.content)
elif itchat_msg["Type"] == NOTE and itchat_msg["MsgType"] == 10000:
if is_group and ("加入群聊" in itchat_msg["Content"] or "加入了群聊" in itchat_msg["Content"]):
if is_group:
if any(note_bot_join_group in itchat_msg["Content"] for note_bot_join_group in notes_bot_join_group): # 邀请机器人加入群聊
logger.warn("机器人加入群聊消息,不处理~")
pass
elif any(note_join_group in itchat_msg["Content"] for note_join_group in notes_join_group): # 若有任何在notes_join_group列表中的字符串出现在NOTE中
# 这里只能得到nickname actual_user_id还是机器人的id
if "加入群聊" in itchat_msg["Content"]:
self.ctype = ContextType.JOIN_GROUP
self.content = itchat_msg["Content"]
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[-1]
elif "加入群聊" in itchat_msg["Content"]:
self.ctype = ContextType.JOIN_GROUP
if "加入群聊" not in itchat_msg["Content"]:
self.ctype = ContextType.JOIN_GROUP
self.content = itchat_msg["Content"]
if "invited" in itchat_msg["Content"]: # 匹配英文信息
self.actual_user_nickname = re.findall(r'invited\s+(.+?)\s+to\s+the\s+group\s+chat', itchat_msg["Content"])[0]
elif "joined" in itchat_msg["Content"]: # 匹配通过二维码加入的英文信息
self.actual_user_nickname = re.findall(r'"(.*?)" joined the group chat via the QR Code shared by', itchat_msg["Content"])[0]
elif "加入了群聊" in itchat_msg["Content"]:
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[-1]
elif "加入群聊" in itchat_msg["Content"]:
self.ctype = ContextType.JOIN_GROUP
self.content = itchat_msg["Content"]
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif any(note_exit_group in itchat_msg["Content"] for note_exit_group in notes_exit_group): # 若有任何在notes_exit_group列表中的字符串出现在NOTE中
self.ctype = ContextType.EXIT_GROUP
self.content = itchat_msg["Content"]
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif is_group and ("移出了群聊" in itchat_msg["Content"]):
self.ctype = ContextType.EXIT_GROUP
self.content = itchat_msg["Content"]
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif "你已添加了" in itchat_msg["Content"]: #通过好友请求
self.ctype = ContextType.ACCEPT_FRIEND
self.content = itchat_msg["Content"]
elif "拍了拍我" in itchat_msg["Content"]:
elif any(note_patpat in itchat_msg["Content"] for note_patpat in notes_patpat): # 若有任何在notes_patpat列表中的字符串出现在NOTE中:
self.ctype = ContextType.PATPAT
self.content = itchat_msg["Content"]
if is_group:
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
if "拍了拍我" in itchat_msg["Content"]: # 识别中文
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif ("tickled my" in itchat_msg["Content"] or "tickled me" in itchat_msg["Content"]):
self.actual_user_nickname = re.findall(r'^(.*?)(?:tickled my|tickled me)', itchat_msg["Content"])[0]
else:
raise NotImplementedError("Unsupported note message: " + itchat_msg["Content"])
elif itchat_msg["Type"] == ATTACHMENT:
+6 -5
View File
@@ -162,11 +162,12 @@ class Query:
logger.debug("[wechatcom] receive message: {}, msg= {}".format(message, msg))
if msg.type == "event":
if msg.event == "subscribe":
reply_content = subscribe_msg()
if reply_content:
reply = create_reply(reply_content, msg).render()
res = channel.crypto.encrypt_message(reply, nonce, timestamp)
return res
pass
# reply_content = subscribe_msg()
# if reply_content:
# reply = create_reply(reply_content, msg).render()
# res = channel.crypto.encrypt_message(reply, nonce, timestamp)
# return res
else:
try:
wechatcom_msg = WechatComAppMessage(msg, client=channel.client)
+17 -2
View File
@@ -24,6 +24,7 @@ GPT35_0125 = "gpt-3.5-turbo-0125"
GPT35_1106 = "gpt-3.5-turbo-1106"
GPT_4o = "gpt-4o"
GPT_4O_0806 = "gpt-4o-2024-08-06"
GPT4_TURBO = "gpt-4-turbo"
GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview"
GPT4_TURBO_04_09 = "gpt-4-turbo-2024-04-09"
@@ -37,6 +38,9 @@ GPT4_32k = "gpt-4-32k"
GPT4_06_13 = "gpt-4-0613"
GPT4_32k_06_13 = "gpt-4-32k-0613"
O1 = "o1-preview"
O1_MINI = "o1-mini"
WHISPER_1 = "whisper-1"
TTS_1 = "tts-1"
TTS_1_HD = "tts-1-hd"
@@ -56,11 +60,22 @@ GEMINI_PRO = "gemini-1.0-pro"
GEMINI_15_flash = "gemini-1.5-flash"
GEMINI_15_PRO = "gemini-1.5-pro"
GLM_4 = "glm-4"
GLM_4_PLUS = "glm-4-plus"
GLM_4_flash = "glm-4-flash"
GLM_4_LONG = "glm-4-long"
GLM_4_ALLTOOLS = "glm-4-alltools"
GLM_4_0520 = "glm-4-0520"
GLM_4_AIR = "glm-4-air"
GLM_4_AIRX = "glm-4-airx"
MODEL_LIST = [
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
GPT_4o, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
WEN_XIN, WEN_XIN_4,
XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax,
XUNFEI,
ZHIPU_AI, GLM_4, GLM_4_PLUS, GLM_4_flash, GLM_4_LONG, GLM_4_ALLTOOLS, GLM_4_0520, GLM_4_AIR, GLM_4_AIRX,
MOONSHOT, MiniMax,
GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,
"claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3-opus-20240229", "claude-3.5-sonnet",
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
+5
View File
@@ -42,8 +42,13 @@ class ChatClient(LinkAIClient):
if reply_voice_mode:
if reply_voice_mode == "voice_reply_voice":
local_config["voice_reply_voice"] = True
local_config["always_reply_voice"] = False
elif reply_voice_mode == "always_reply_voice":
local_config["always_reply_voice"] = True
local_config["voice_reply_voice"] = True
elif reply_voice_mode == "no_reply_voice":
local_config["always_reply_voice"] = False
local_config["voice_reply_voice"] = False
if config.get("admin_password"):
if not plugin_config.get("Godcmd"):
+2
View File
@@ -27,6 +27,7 @@ available_setting = {
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
"single_chat_reply_suffix": "", # 私聊时自动回复的后缀,\n 可以换行
"group_chat_prefix": ["@bot"], # 群聊时包含该前缀则会触发机器人回复
"no_need_at": False, # 群聊回复时是否不需要艾特
"group_chat_reply_prefix": "", # 群聊时自动回复的前缀
"group_chat_reply_suffix": "", # 群聊时自动回复的后缀,\n 可以换行
"group_chat_keyword": [], # 群聊时包含该关键词则会触发机器人回复
@@ -69,6 +70,7 @@ available_setting = {
"baidu_wenxin_model": "eb-instant", # 默认使用ERNIE-Bot-turbo模型
"baidu_wenxin_api_key": "", # Baidu api key
"baidu_wenxin_secret_key": "", # Baidu secret key
"baidu_wenxin_prompt_enabled": False, # Enable prompt if you are using ernie character model
# 讯飞星火API
"xunfei_app_id": "", # 讯飞应用ID
"xunfei_api_key": "", # 讯飞 API key
+2 -2
View File
@@ -99,8 +99,8 @@ class Role(Plugin):
if e_context["context"].type != ContextType.TEXT:
return
btype = Bridge().get_bot_type("chat")
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax]:
logger.warn(f'不支持的bot: {btype}')
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI]:
logger.debug(f'不支持的bot: {btype}')
return
bot = Bridge().get_bot("chat")
content = e_context["context"].content[:]
+192
View File
@@ -0,0 +1,192 @@
#!/usr/bin/env bash
set -e
# 颜色定义
RED='\033[0;31m' # 红色
GREEN='\033[0;32m' # 绿色
YELLOW='\033[0;33m' # 黄色
BLUE='\033[0;34m' # 蓝色
NC='\033[0m' # 无颜色
# 获取当前脚本的目录
export BASE_DIR=$(cd "$(dirname "$0")"; pwd)
echo -e "${GREEN}📁 BASE_DIR: ${BASE_DIR}${NC}"
# 检查 config.json 文件是否存在
check_config_file() {
if [ ! -f "${BASE_DIR}/config.json" ]; then
echo -e "${RED}❌ 错误:未找到 config.json 文件。请确保 config.json 存在于当前目录。${NC}"
exit 1
fi
}
# 检查 Python 版本是否大于等于 3.7,并检查 pip 是否可用
check_python_version() {
if ! command -v python3 &> /dev/null; then
echo -e "${RED}❌ 错误:未找到 Python3。请安装 Python 3.7 或以上版本。${NC}"
exit 1
fi
PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
PYTHON_MAJOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f1)
PYTHON_MINOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f2)
if (( PYTHON_MAJOR < 3 || (PYTHON_MAJOR == 3 && PYTHON_MINOR < 7) )); then
echo -e "${RED}❌ 错误:Python 版本为 ${PYTHON_VERSION}。请安装 Python 3.7 或以上版本。${NC}"
exit 1
fi
if ! python3 -m pip --version &> /dev/null; then
echo -e "${RED}❌ 错误:未找到 pip。请安装 pip。${NC}"
exit 1
fi
}
# 检查并安装缺失的依赖
install_dependencies() {
echo -e "${YELLOW}⏳ 正在安装依赖...${NC}"
if [ ! -f "${BASE_DIR}/requirements.txt" ]; then
echo -e "${RED}❌ 错误:未找到 requirements.txt 文件。${NC}"
exit 1
fi
# 安装 requirements.txt 中的依赖,使用清华大学的 PyPI 镜像
pip3 install -r "${BASE_DIR}/requirements.txt" -i https://pypi.tuna.tsinghua.edu.cn/simple
# 处理 requirements-optional.txt(如果存在)
if [ -f "${BASE_DIR}/requirements-optional.txt" ]; then
echo -e "${YELLOW}⏳ 正在安装可选的依赖...${NC}"
pip3 install -r "${BASE_DIR}/requirements-optional.txt" -i https://pypi.tuna.tsinghua.edu.cn/simple
fi
}
# 启动项目
run_project() {
echo -e "${GREEN}🚀 准备启动项目...${NC}"
cd "${BASE_DIR}"
sleep 2
# 判断操作系统类型
OS_TYPE=$(uname)
if [[ "$OS_TYPE" == "Linux" ]]; then
# 在 Linux 上使用 setsid
setsid python3 "${BASE_DIR}/app.py" > "${BASE_DIR}/nohup.out" 2>&1 &
echo -e "${GREEN}🚀 正在启动 ChatGPT-on-WeChat (Linux)...${NC}"
elif [[ "$OS_TYPE" == "Darwin" ]]; then
# 在 macOS 上直接运行
python3 "${BASE_DIR}/app.py" > "${BASE_DIR}/nohup.out" 2>&1 &
echo -e "${GREEN}🚀 正在启动 ChatGPT-on-WeChat (macOS)...${NC}"
else
echo -e "${RED}❌ 错误:不支持的操作系统 ${OS_TYPE}${NC}"
exit 1
fi
sleep 2
# 显示日志输出,供用户扫码
tail -n 30 -f "${BASE_DIR}/nohup.out"
}
# 更新项目
update_project() {
echo -e "${GREEN}🔄 准备更新项目,现在停止项目...${NC}"
cd "${BASE_DIR}"
# 停止项目
stop_project
echo -e "${GREEN}🔄 开始更新项目...${NC}"
# 更新代码,从 git 仓库拉取最新代码
if [ -d .git ]; then
GIT_PULL_OUTPUT=$(git pull)
if [ $? -eq 0 ]; then
if [[ "$GIT_PULL_OUTPUT" == *"Already up to date."* ]]; then
echo -e "${GREEN}✅ 代码已经是最新的。${NC}"
else
echo -e "${GREEN}✅ 代码更新完成。${NC}"
fi
else
echo -e "${YELLOW}⚠️ 从 GitHub 更新失败,尝试切换到 Gitee 仓库...${NC}"
# 更改远程仓库为 Gitee
git remote set-url origin https://gitee.com/zhayujie/chatgpt-on-wechat.git
GIT_PULL_OUTPUT=$(git pull)
if [ $? -eq 0 ]; then
if [[ "$GIT_PULL_OUTPUT" == *"Already up to date."* ]]; then
echo -e "${GREEN}✅ 代码已经是最新的。${NC}"
else
echo -e "${GREEN}✅ 从 Gitee 更新成功。${NC}"
fi
else
echo -e "${RED}❌ 错误:从 Gitee 更新仍然失败,请检查网络连接。${NC}"
exit 1
fi
fi
else
echo -e "${RED}❌ 错误:当前目录不是 git 仓库,无法更新代码。${NC}"
exit 1
fi
# 安装依赖
install_dependencies
# 启动项目
run_project
}
# 停止项目
stop_project() {
echo -e "${GREEN}🛑 正在停止项目...${NC}"
cd "${BASE_DIR}"
pid=$(ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}')
if [ -z "$pid" ] ; then
echo -e "${YELLOW}⚠️ 未找到正在运行的 ChatGPT-on-WeChat。${NC}"
return
fi
echo -e "${GREEN}🛑 正在运行的 ChatGPT-on-WeChat (PID: ${pid})${NC}"
kill ${pid}
sleep 3
if ps -p $pid > /dev/null; then
echo -e "${YELLOW}⚠️ 进程未停止,尝试强制终止...${NC}"
kill -9 ${pid}
fi
echo -e "${GREEN}✅ 已停止 ChatGPT-on-WeChat (PID: ${pid})${NC}"
}
# 主函数,根据用户参数执行操作
case "$1" in
start)
check_config_file
check_python_version
run_project
;;
stop)
stop_project
;;
restart)
stop_project
check_config_file
check_python_version
run_project
;;
update)
check_config_file
check_python_version
update_project
;;
*)
echo -e "${YELLOW}=========================================${NC}"
echo -e "${YELLOW}用法:${GREEN}$0 ${BLUE}{start|stop|restart|update}${NC}"
echo -e "${YELLOW}示例:${NC}"
echo -e " ${GREEN}$0 ${BLUE}start${NC}"
echo -e " ${GREEN}$0 ${BLUE}stop${NC}"
echo -e " ${GREEN}$0 ${BLUE}restart${NC}"
echo -e " ${GREEN}$0 ${BLUE}update${NC}"
echo -e "${YELLOW}=========================================${NC}"
exit 1
;;
esac
+1 -1
View File
@@ -65,7 +65,7 @@ class AzureVoice(Voice):
reply = Reply(ReplyType.TEXT, result.text)
else:
cancel_details = result.cancellation_details
logger.error("[Azure] voiceToText error, result={}, errordetails={}".format(result, cancel_details.error_details))
logger.error("[Azure] voiceToText error, result={}, errordetails={}".format(result, cancel_details))
reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败")
return reply