Compare commits

..

29 Commits

Author SHA1 Message Date
lanvent ee91c86a29 Update README.md 2023-04-10 14:52:06 +08:00
lanvent 48c08f4aad unset default timeout 2023-04-10 14:50:34 +08:00
lanvent fceabb8e67 Merge Pull Request #787 into master 2023-04-09 20:11:21 +08:00
lanvent fcfafb05f1 fix: wechatmp's deadloop when reply is None from @JS00000 #789 2023-04-09 20:01:03 +08:00
lanvent f1e8344beb fix: no old signal handler 2023-04-09 19:15:28 +08:00
yubai 89e8f385b4 bugfix for azure chatgpt adapting 2023-04-09 18:00:05 +08:00
lanvent bf4ae9a051 fix: create tmpdir 2023-04-09 17:37:19 +08:00
lanvent 6bd1242d43 chore: update requirements and config-template 2023-04-09 16:16:54 +08:00
lanvent 8779eab36b feat: itchat support picture msg 2023-04-09 00:45:42 +08:00
lanvent 3174b1158c chore: merge itchat msg 2023-04-08 23:32:37 +08:00
lanvent 18740093d1 Merge branch 'master' of https://github.com/zhayujie/chatgpt-on-wechat into master-dev 2023-04-08 01:25:59 +08:00
lanvent 8c7d1d4010 Merge Pull Request #774 into master 2023-04-08 01:23:54 +08:00
lanvent 8c48a27e1a Merge branch 'master' of https://github.com/zhayujie/chatgpt-on-wechat into master-dev 2023-04-07 23:42:30 +08:00
lanvent 4278d2b8ef feat: add updatep command 2023-04-07 23:31:07 +08:00
lanvent 3a3affd3ec fix: wechatmp event and query timeout 2023-04-07 20:53:21 +08:00
JS00000 45d72b8b9b Update README 2023-04-07 20:46:00 +08:00
JS00000 03b908c079 Merge branch 'master' into wechatmp 2023-04-07 20:28:08 +08:00
JS00000 d35d01f980 Add wechatmp_service channel 2023-04-07 19:47:50 +08:00
Jianglang 9c208ffa2c Update README.md 2023-04-07 18:29:16 +08:00
lanvent bea4416f12 fix: wechatmp subscribe event 2023-04-07 18:23:52 +08:00
lanvent 2ea8b4ef73 fix: chat when single_chat_prefix is None 2023-04-07 16:30:38 +08:00
lanvent e6946ef989 modify default value of concurrency_in_session 2023-04-07 16:03:59 +08:00
lanvent 9aeb60f66d feat: add replicate to source.json 2023-04-07 15:15:40 +08:00
lanvent d687f9329e fix: add maxsplit=1 in wechatmp 2023-04-07 12:28:01 +08:00
lanvent 3207258fd9 fix: check duplicate in wechatmp 2023-04-07 12:22:24 +08:00
lanvent d8b75206fe feat: maxmize message length 2023-04-07 12:15:29 +08:00
lanvent 88e8dd5162 chroe: specify necessary property in chatmessage 2023-04-07 01:22:30 +08:00
lanvent c9306633b2 fix: read source.json with utf-8 2023-04-07 01:15:31 +08:00
Jianglang c50d1cc99d Update README.md 2023-04-07 01:09:16 +08:00
27 changed files with 546 additions and 294 deletions
+1
View File
@@ -1,5 +1,6 @@
.DS_Store
.idea
.vscode
.wechaty/
__pycache__/
venv*
+5 -1
View File
@@ -13,10 +13,14 @@
- [x] **语音识别:** 支持接收和处理语音消息,通过文字或语音回复
- [x] **插件化:** 支持个性化功能插件,提供角色扮演、文字冒险游戏等预设插件
> 快速部署:
> 目前支持微信和微信个人号部署,欢迎接入更多应用,参考[`Terminal`代码](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/terminal/terminal_channel.py)实现接收和发送消息逻辑即可接入。
快速部署:
>
>[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/qApznZ?referralCode=RC3znh)
# 更新日志
>**2023.04.05** 支持微信个人号部署,兼容角色扮演等预设插件,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatmp/README.md)。(contributed by [@JS00000](https://github.com/JS00000) in [#686](https://github.com/zhayujie/chatgpt-on-wechat/pull/686))
+3 -2
View File
@@ -13,7 +13,8 @@ def sigterm_handler_wrap(_signo):
def func(_signo, _stack_frame):
logger.info("signal {} received, exiting...".format(_signo))
conf().save_user_datas()
return old_handler(_signo, _stack_frame)
if callable(old_handler): # check old_handler
return old_handler(_signo, _stack_frame)
signal.signal(_signo, func)
def run():
@@ -36,7 +37,7 @@ def run():
# os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:9001'
channel = channel_factory.create_channel(channel_name)
if channel_name in ['wx','wxy','wechatmp','terminal']:
if channel_name in ['wx','wxy','terminal','wechatmp','wechatmp_service']:
PluginManager().load_plugins()
# startup channel
+7 -7
View File
@@ -3,13 +3,12 @@
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
from bot.session_manager import Session, SessionManager
from bot.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from config import conf, load_config
from common.log import logger
from common.token_bucket import TokenBucket
from common.expired_dict import ExpiredDict
import openai
import openai.error
import time
@@ -91,8 +90,8 @@ class ChatGPTBot(Bot,OpenAIImage):
"top_p":1,
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get('request_timeout', 60), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get('request_timeout', 120), #重试超时时间,在这个时间内,将会自动重试
"request_timeout": conf().get('request_timeout', None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get('request_timeout', None), #重试超时时间,在这个时间内,将会自动重试
}
def reply_text(self, session:ChatGPTSession, session_id, api_key, retry_count=0) -> dict:
@@ -151,6 +150,7 @@ class AzureChatGPTBot(ChatGPTBot):
def compose_args(self):
args = super().compose_args()
args["engine"] = args["model"]
del(args["model"])
return args
args["deployment_id"] = conf().get("azure_deployment_id")
#args["engine"] = args["model"]
#del(args["model"])
return args
+2 -2
View File
@@ -55,7 +55,7 @@ def num_tokens_from_messages(messages, model):
except KeyError:
logger.debug("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
if model == "gpt-3.5-turbo" or model == "gpt-35-turbo":
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
elif model == "gpt-4":
return num_tokens_from_messages(messages, model="gpt-4-0314")
@@ -76,4 +76,4 @@ def num_tokens_from_messages(messages, model):
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
return num_tokens
+1 -1
View File
@@ -19,7 +19,7 @@ class Bridge(object):
model_type = conf().get("model")
if model_type in ["text-davinci-003"]:
self.btype['chat'] = const.OPEN_AI
if conf().get("use_azure_chatgpt"):
if conf().get("use_azure_chatgpt", False):
self.btype['chat'] = const.CHATGPTONAZURE
self.bots={}
+2 -1
View File
@@ -5,7 +5,8 @@ from enum import Enum
class ContextType (Enum):
TEXT = 1 # 文本消息
VOICE = 2 # 音频消息
IMAGE_CREATE = 3 # 创建图片命令
IMAGE = 3 # 图片消息
IMAGE_CREATE = 10 # 创建图片命令
def __str__(self):
return self.name
+4 -1
View File
@@ -19,5 +19,8 @@ def create_channel(channel_type):
return TerminalChannel()
elif channel_type == 'wechatmp':
from channel.wechatmp.wechatmp_channel import WechatMPChannel
return WechatMPChannel()
return WechatMPChannel(passive_reply = True)
elif channel_type == 'wechatmp_service':
from channel.wechatmp.wechatmp_channel import WechatMPChannel
return WechatMPChannel(passive_reply = False)
raise RuntimeError
+17 -5
View File
@@ -97,7 +97,7 @@ class ChatChannel(Channel):
logger.info("[WX]receive group voice, but checkprefix didn't match")
return None
else: # 单聊
match_prefix = check_prefix(content, conf().get('single_chat_prefix'))
match_prefix = check_prefix(content, conf().get('single_chat_prefix',['']))
if match_prefix is not None: # 判断如果匹配到自定义前缀,则返回过滤掉前缀+空格后的内容
content = content.replace(match_prefix, '', 1).strip()
elif context["origin_ctype"] == ContextType.VOICE: # 如果源消息是私聊的语音消息,允许不匹配前缀,放宽条件
@@ -170,6 +170,8 @@ class ChatChannel(Channel):
reply = self._generate_reply(new_context)
else:
return
elif context.type == ContextType.IMAGE: # 图片消息,当前无默认逻辑
pass
else:
logger.error('[WX] unknown context type: {}'.format(context.type))
return
@@ -231,12 +233,20 @@ class ChatChannel(Channel):
time.sleep(3+3*retry_cnt)
self._send(reply, context, retry_cnt+1)
def thread_pool_callback(self, session_id):
def _success_callback(self, session_id, **kwargs):# 线程正常结束时的回调函数
logger.debug("Worker return success, session_id = {}".format(session_id))
def _fail_callback(self, session_id, exception, **kwargs): # 线程异常结束时的回调函数
logger.exception("Worker return exception: {}".format(exception))
def _thread_pool_callback(self, session_id, **kwargs):
def func(worker:Future):
try:
worker_exception = worker.exception()
if worker_exception:
logger.exception("Worker return exception: {}".format(worker_exception))
self._fail_callback(session_id, exception = worker_exception, **kwargs)
else:
self._success_callback(session_id, **kwargs)
except CancelledError as e:
logger.info("Worker cancelled, session_id = {}".format(session_id))
except Exception as e:
@@ -249,7 +259,7 @@ class ChatChannel(Channel):
session_id = context['session_id']
with self.lock:
if session_id not in self.sessions:
self.sessions[session_id] = [Dequeue(), threading.BoundedSemaphore(conf().get("concurrency_in_session", 1))]
self.sessions[session_id] = [Dequeue(), threading.BoundedSemaphore(conf().get("concurrency_in_session", 4))]
if context.type == ContextType.TEXT and context.content.startswith("#"):
self.sessions[session_id][0].putleft(context) # 优先处理管理命令
else:
@@ -267,7 +277,7 @@ class ChatChannel(Channel):
context = context_queue.get()
logger.debug("[WX] consume context: {}".format(context))
future:Future = self.handler_pool.submit(self._handle, context)
future.add_done_callback(self.thread_pool_callback(session_id))
future.add_done_callback(self._thread_pool_callback(session_id, context = context))
if session_id not in self.futures:
self.futures[session_id] = []
self.futures[session_id].append(future)
@@ -302,6 +312,8 @@ class ChatChannel(Channel):
def check_prefix(content, prefix_list):
if not prefix_list:
return None
for prefix in prefix_list:
if content.startswith(prefix):
return prefix
+12 -10
View File
@@ -1,27 +1,29 @@
"""
本类表示聊天消息,用于对itchat和wechaty的消息进行统一的封装
本类表示聊天消息,用于对itchat和wechaty的消息进行统一的封装
填好必填项(群聊6个,非群聊8个),即可接入ChatChannel,并支持插件,参考TerminalChannel
ChatMessage
msg_id: 消息id
msg_id: 消息id (必填)
create_time: 消息创建时间
ctype: 消息类型 : ContextType
content: 消息内容, 如果是声音/图片,这里是文件路径
ctype: 消息类型 : ContextType (必填)
content: 消息内容, 如果是声音/图片,这里是文件路径 (必填)
from_user_id: 发送者id
from_user_id: 发送者id (必填)
from_user_nickname: 发送者昵称
to_user_id: 接收者id
to_user_id: 接收者id (必填)
to_user_nickname: 接收者昵称
other_user_id: 对方的id,如果你是发送者,那这个就是接收者id,如果你是接收者,那这个就是发送者id,如果是群消息,那这一直是群id
other_user_id: 对方的id,如果你是发送者,那这个就是接收者id,如果你是接收者,那这个就是发送者id,如果是群消息,那这一直是群id (必填)
other_user_nickname: 同上
is_group: 是否是群消息
is_at: 是否被at
is_group: 是否是群消息 (群聊必填)
is_at: 是否被at
- (群消息时,一般会存在实际发送者,是群内某个成员的id和昵称,下列项仅在群消息时存在)
actual_user_id: 实际发送者id
actual_user_id: 实际发送者id (群聊必填)
actual_user_nickname:实际发送者昵称
+29 -39
View File
@@ -23,26 +23,21 @@ from common.time_check import time_checker
from common.expired_dict import ExpiredDict
from plugins import *
@itchat.msg_register(TEXT)
@itchat.msg_register([TEXT,VOICE,PICTURE])
def handler_single_msg(msg):
WechatChannel().handle_text(WeChatMessage(msg))
# logger.debug("handler_single_msg: {}".format(msg))
if msg['Type'] == PICTURE and msg['MsgType'] == 47:
return None
WechatChannel().handle_single(WeChatMessage(msg))
return None
@itchat.msg_register(TEXT, isGroupChat=True)
@itchat.msg_register([TEXT,VOICE,PICTURE], isGroupChat=True)
def handler_group_msg(msg):
if msg['Type'] == PICTURE and msg['MsgType'] == 47:
return None
WechatChannel().handle_group(WeChatMessage(msg,True))
return None
@itchat.msg_register(VOICE)
def handler_single_voice(msg):
WechatChannel().handle_voice(WeChatMessage(msg))
return None
@itchat.msg_register(VOICE, isGroupChat=True)
def handler_group_voice(msg):
WechatChannel().handle_group_voice(WeChatMessage(msg,True))
return None
def _check(func):
def wrapper(self, cmsg: ChatMessage):
msgId = cmsg.msg_id
@@ -118,7 +113,7 @@ class WechatChannel(ChatChannel):
# start message listener
itchat.run()
# handle_* 系列函数处理收到的消息后构造Context,然后传入_handle函数中处理Context和发送回复
# handle_* 系列函数处理收到的消息后构造Context,然后传入produce函数中处理Context和发送回复
# Context包含了消息的所有信息,包括以下属性
# type 消息类型, 包括TEXT、VOICE、IMAGE_CREATE
# content 消息内容,如果是TEXT类型,content就是文本内容,如果是VOICE类型,content就是语音文件名,如果是IMAGE_CREATE类型,content就是图片生成命令
@@ -132,37 +127,32 @@ class WechatChannel(ChatChannel):
@time_checker
@_check
def handle_voice(self, cmsg : ChatMessage):
if conf().get('speech_recognition') != True:
return
logger.debug("[WX]receive voice msg: {}".format(cmsg.content))
context = self._compose_context(ContextType.VOICE, cmsg.content, isgroup=False, msg=cmsg)
if context:
self.produce(context)
@time_checker
@_check
def handle_text(self, cmsg : ChatMessage):
logger.debug("[WX]receive text msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg))
context = self._compose_context(ContextType.TEXT, cmsg.content, isgroup=False, msg=cmsg)
def handle_single(self, cmsg : ChatMessage):
if cmsg.ctype == ContextType.VOICE:
if conf().get('speech_recognition') != True:
return
logger.debug("[WX]receive voice msg: {}".format(cmsg.content))
elif cmsg.ctype == ContextType.IMAGE:
logger.debug("[WX]receive image msg: {}".format(cmsg.content))
else:
logger.debug("[WX]receive text msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg))
context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=False, msg=cmsg)
if context:
self.produce(context)
@time_checker
@_check
def handle_group(self, cmsg : ChatMessage):
logger.debug("[WX]receive group msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg))
context = self._compose_context(ContextType.TEXT, cmsg.content, isgroup=True, msg=cmsg)
if context:
self.produce(context)
@time_checker
@_check
def handle_group_voice(self, cmsg : ChatMessage):
if conf().get('group_speech_recognition', False) != True:
return
logger.debug("[WX]receive voice for group msg: {}".format(cmsg.content))
context = self._compose_context(ContextType.VOICE, cmsg.content, isgroup=True, msg=cmsg)
if cmsg.ctype == ContextType.VOICE:
if conf().get('speech_recognition') != True:
return
logger.debug("[WX]receive voice for group msg: {}".format(cmsg.content))
elif cmsg.ctype == ContextType.IMAGE:
logger.debug("[WX]receive image for group msg: {}".format(cmsg.content))
else:
# logger.debug("[WX]receive group msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg))
pass
context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=True, msg=cmsg)
if context:
self.produce(context)
+4
View File
@@ -22,6 +22,10 @@ class WeChatMessage(ChatMessage):
self.ctype = ContextType.VOICE
self.content = TmpDir().path() + itchat_msg['FileName'] # content直接存临时目录路径
self._prepare_fn = lambda: itchat_msg.download(self.content)
elif itchat_msg['Type'] == PICTURE and itchat_msg['MsgType'] == 3:
self.ctype = ContextType.IMAGE
self.content = TmpDir().path() + itchat_msg['FileName'] # content直接存临时目录路径
self._prepare_fn = lambda: itchat_msg.download(self.content)
else:
raise NotImplementedError("Unsupported message type: {}".format(itchat_msg['Type']))
+17 -9
View File
@@ -1,10 +1,11 @@
# 个人微信公众号channel
# 微信公众号channel
鉴于个人微信号在服务器上通过itchat登录有封号风险,这里新增了个人微信公众号channel,提供无风险的服务。
但是由于个人微信公众号的众多接口限制,目前支持的功能有限,实现简陋,提供了一个最基本的文本对话服务,支持加载插件,优化了命令格式,支持私有api_key。暂未实现图片输入输出、语音输入输出等交互形式
如有公众号是企业主体且可以通过微信认证,即可获得更多接口,解除大多数限制。欢迎大家提供更多的支持
鉴于个人微信号在服务器上通过itchat登录有封号风险,这里新增了微信公众号channel,提供无风险的服务。
目前支持订阅号(个人)和服务号(企业)两种类型的公众号,它们的主要区别就是被动回复和主动回复
个人微信订阅号有许多接口限制,目前仅支持最基本的文本对话和语音输入,支持加载插件,支持私有api_key
暂未实现图片输入输出、语音输出等交互形式。
## 使用方法
## 使用方法(订阅号,服务号类似)
在开始部署前,你需要一个拥有公网IP的服务器,以提供微信服务器和我们自己服务器的连接。或者你需要进行内网穿透,否则微信服务器无法将消息发送给我们的服务器。
@@ -21,8 +22,10 @@ pip3 install web.py
相关的服务器验证代码已经写好,你不需要再添加任何代码。你只需要在本项目根目录的`config.json`中添加
```
"channel_type": "wechatmp",
"wechatmp_token": "your Token",
"wechatmp_port": 8080,
"wechatmp_token": "Token", # 微信公众平台的Token
"wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
"wechatmp_app_id": "", # 微信公众平台的appID,仅服务号需要
"wechatmp_app_secret": "", # 微信公众平台的appsecret,仅服务号需要
```
然后运行`python3 app.py`启动web服务器。这里会默认监听8080端口,但是微信公众号的服务器配置只支持80/443端口,有两种方法来解决这个问题。第一个是推荐的方法,使用端口转发命令将80端口转发到8080端口(443同理,注意需要支持SSL,也就是https的访问,在`wechatmp_channel.py`需要修改相应的证书路径):
```
@@ -35,7 +38,7 @@ sudo iptables-save > /etc/iptables/rules.v4
随后在[微信公众平台](https://mp.weixin.qq.com)启用服务器,关闭手动填写规则的自动回复,即可实现ChatGPT的自动回复。
## 个人微信公众号的限制
由于目前测试的公众号不是企业主体,所以没有客服接口,因此公众号无法主动发出消息,只能被动回复。而微信官方对被动回复有5秒的时间限制,最多重试2次,因此最多只有15秒的自动回复时间窗口。因此如果问题比较复杂或者我们的服务器比较忙,ChatGPT的回答就没办法及时回复给用户。为了解决这个问题,这里做了回答缓存,它需要你在回复超时后,再次主动发送任意文字(例如1)来尝试拿到回答缓存。为了优化使用体验,目前设置了两分钟(120秒)的timeout,用户在至多两分钟后即可得到查询到回复或者错误原因。
由于人微信公众号不能通过微信认证,所以没有客服接口,因此公众号无法主动发出消息,只能被动回复。而微信官方对被动回复有5秒的时间限制,最多重试2次,因此最多只有15秒的自动回复时间窗口。因此如果问题比较复杂或者我们的服务器比较忙,ChatGPT的回答就没办法及时回复给用户。为了解决这个问题,这里做了回答缓存,它需要你在回复超时后,再次主动发送任意文字(例如1)来尝试拿到回答缓存。为了优化使用体验,目前设置了两分钟(120秒)的timeout,用户在至多两分钟后即可得到查询到回复或者错误原因。
另外,由于微信官方的限制,自动回复有长度限制。因此这里将ChatGPT的回答拆分,分成每段600字回复(限制大约在700字)。
@@ -43,4 +46,9 @@ sudo iptables-save > /etc/iptables/rules.v4
公共api有访问频率限制(免费账号每分钟最多20次ChatGPT的API调用),这在服务多人的时候会遇到问题。因此这里多加了一个设置私有api_key的功能。目前通过godcmd插件的命令来设置私有api_key。
## 测试范围
目前在`RoboStyle`这个公众号上进行了测试,感兴趣的可以关注并体验。开启了godcmd, Banwords, role, dungeon, finish这五个插件,其他的插件还没有测试。百度的接口暂未测试。语音对话没有测试。图片直接以链接形式回复(没有临时素材上传接口的权限)。
目前在`RoboStyle`这个公众号上进行了测试(基于[wechatmp-stable分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp-stable),而[master分支](https://github.com/zhayujie/chatgpt-on-wechat)含有最新功能,但是稳定性有待测试),感兴趣的可以关注并体验。开启了godcmd, Banwords, role, dungeon, finish这五个插件,其他的插件还没有测试。百度的接口暂未测试。语音对话没有测试。图片直接以链接形式回复(没有临时素材上传接口的权限)。
## TODO
* 服务号交互完善
* 服务号使用临时素材接口,提供图片回复能力
* 插件测试
+51
View File
@@ -0,0 +1,51 @@
import web
import time
import channel.wechatmp.reply as reply
import channel.wechatmp.receive as receive
from config import conf
from common.log import logger
from bridge.context import *
from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_channel import WechatMPChannel
# This class is instantiated once per query
class Query():
def GET(self):
return verify_server(web.input())
def POST(self):
# Make sure to return the instance that first created, @singleton will do that.
channel = WechatMPChannel()
try:
webData = web.data()
# logger.debug("[wechatmp] Receive request:\n" + webData.decode("utf-8"))
wechatmp_msg = receive.parse_xml(webData)
if wechatmp_msg.msg_type == 'text':
from_user = wechatmp_msg.from_user_id
message = wechatmp_msg.content.decode("utf-8")
message_id = wechatmp_msg.msg_id
logger.info("[wechatmp] {}:{} Receive post query {} {}: {}".format(web.ctx.env.get('REMOTE_ADDR'), web.ctx.env.get('REMOTE_PORT'), from_user, message_id, message))
context = channel._compose_context(ContextType.TEXT, message, isgroup=False, msg=wechatmp_msg)
if context:
# set private openai_api_key
# if from_user is not changed in itchat, this can be placed at chat_channel
user_data = conf().get_user_data(from_user)
context['openai_api_key'] = user_data.get('openai_api_key') # None or user openai_api_key
channel.produce(context)
# The reply will be sent by channel.send() in another thread
return "success"
elif wechatmp_msg.msg_type == 'event':
logger.info("[wechatmp] Event {} from {}".format(wechatmp_msg.Event, wechatmp_msg.from_user_id))
content = subscribe_msg()
replyMsg = reply.TextMsg(wechatmp_msg.from_user_id, wechatmp_msg.to_user_id, content)
return replyMsg.send()
else:
logger.info("暂且不处理")
return "success"
except Exception as exc:
logger.exception(exc)
return exc
+172
View File
@@ -0,0 +1,172 @@
import web
import time
import channel.wechatmp.reply as reply
import channel.wechatmp.receive as receive
from config import conf
from common.log import logger
from bridge.context import *
from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_channel import WechatMPChannel
# This class is instantiated once per query
class Query():
def GET(self):
return verify_server(web.input())
def POST(self):
# Make sure to return the instance that first created, @singleton will do that.
channel = WechatMPChannel()
try:
query_time = time.time()
webData = web.data()
logger.debug("[wechatmp] Receive request:\n" + webData.decode("utf-8"))
wechatmp_msg = receive.parse_xml(webData)
if wechatmp_msg.msg_type == 'text':
from_user = wechatmp_msg.from_user_id
to_user = wechatmp_msg.to_user_id
message = wechatmp_msg.content.decode("utf-8")
message_id = wechatmp_msg.msg_id
logger.info("[wechatmp] {}:{} Receive post query {} {}: {}".format(web.ctx.env.get('REMOTE_ADDR'), web.ctx.env.get('REMOTE_PORT'), from_user, message_id, message))
supported = True
if "【收到不支持的消息类型,暂无法显示】" in message:
supported = False # not supported, used to refresh
cache_key = from_user
reply_text = ""
# New request
if cache_key not in channel.cache_dict and cache_key not in channel.running:
# The first query begin, reset the cache
context = channel._compose_context(ContextType.TEXT, message, isgroup=False, msg=wechatmp_msg)
logger.debug("[wechatmp] context: {} {}".format(context, wechatmp_msg))
if message_id in channel.received_msgs: # received and finished
# no return because of bandwords or other reasons
return "success"
if supported and context:
# set private openai_api_key
# if from_user is not changed in itchat, this can be placed at chat_channel
user_data = conf().get_user_data(from_user)
context['openai_api_key'] = user_data.get('openai_api_key') # None or user openai_api_key
channel.received_msgs[message_id] = wechatmp_msg
channel.running.add(cache_key)
channel.produce(context)
else:
trigger_prefix = conf().get('single_chat_prefix',[''])[0]
if trigger_prefix or not supported:
if trigger_prefix:
content = textwrap.dedent(f"""\
请输入'{trigger_prefix}'接你想说的话跟我说话。
例如:
{trigger_prefix}你好,很高兴见到你。""")
else:
content = textwrap.dedent("""\
你好,很高兴见到你。
请跟我说话吧。""")
else:
logger.error(f"[wechatmp] unknown error")
content = textwrap.dedent("""\
未知错误,请稍后再试""")
replyMsg = reply.TextMsg(wechatmp_msg.from_user_id, wechatmp_msg.to_user_id, content)
return replyMsg.send()
channel.query1[cache_key] = False
channel.query2[cache_key] = False
channel.query3[cache_key] = False
# User request again, and the answer is not ready
elif cache_key in channel.running and channel.query1.get(cache_key) == True and channel.query2.get(cache_key) == True and channel.query3.get(cache_key) == True:
channel.query1[cache_key] = False #To improve waiting experience, this can be set to True.
channel.query2[cache_key] = False #To improve waiting experience, this can be set to True.
channel.query3[cache_key] = False
# User request again, and the answer is ready
elif cache_key in channel.cache_dict:
# Skip the waiting phase
channel.query1[cache_key] = True
channel.query2[cache_key] = True
channel.query3[cache_key] = True
assert not (cache_key in channel.cache_dict and cache_key in channel.running)
if channel.query1.get(cache_key) == False:
# The first query from wechat official server
logger.debug("[wechatmp] query1 {}".format(cache_key))
channel.query1[cache_key] = True
cnt = 0
while cache_key in channel.running and cnt < 45:
cnt = cnt + 1
time.sleep(0.1)
if cnt == 45:
# waiting for timeout (the POST query will be closed by wechat official server)
time.sleep(1)
# and do nothing
return
else:
pass
elif channel.query2.get(cache_key) == False:
# The second query from wechat official server
logger.debug("[wechatmp] query2 {}".format(cache_key))
channel.query2[cache_key] = True
cnt = 0
while cache_key in channel.running and cnt < 45:
cnt = cnt + 1
time.sleep(0.1)
if cnt == 45:
# waiting for timeout (the POST query will be closed by wechat official server)
time.sleep(1)
# and do nothing
return
else:
pass
elif channel.query3.get(cache_key) == False:
# The third query from wechat official server
logger.debug("[wechatmp] query3 {}".format(cache_key))
channel.query3[cache_key] = True
cnt = 0
while cache_key in channel.running and cnt < 40:
cnt = cnt + 1
time.sleep(0.1)
if cnt == 40:
# Have waiting for 3x5 seconds
# return timeout message
reply_text = "【正在思考中,回复任意文字尝试获取回复】"
logger.info("[wechatmp] Three queries has finished For {}: {}".format(from_user, message_id))
replyPost = reply.TextMsg(from_user, to_user, reply_text).send()
return replyPost
else:
pass
if cache_key not in channel.cache_dict and cache_key not in channel.running:
# no return because of bandwords or other reasons
return "success"
# if float(time.time()) - float(query_time) > 4.8:
# reply_text = "【正在思考中,回复任意文字尝试获取回复】"
# logger.info("[wechatmp] Timeout for {} {}, return".format(from_user, message_id))
# replyPost = reply.TextMsg(from_user, to_user, reply_text).send()
# return replyPost
if cache_key in channel.cache_dict:
content = channel.cache_dict[cache_key]
if len(content.encode('utf8'))<=MAX_UTF8_LEN:
reply_text = channel.cache_dict[cache_key]
channel.cache_dict.pop(cache_key)
else:
continue_text = "\n【未完待续,回复任意文字以继续】"
splits = split_string_by_utf8_length(content, MAX_UTF8_LEN - len(continue_text.encode('utf-8')), max_split= 1)
reply_text = splits[0] + continue_text
channel.cache_dict[cache_key] = splits[1]
logger.info("[wechatmp] {}:{} Do send {}".format(web.ctx.env.get('REMOTE_ADDR'), web.ctx.env.get('REMOTE_PORT'), reply_text))
replyPost = reply.TextMsg(from_user, to_user, reply_text).send()
return replyPost
elif wechatmp_msg.msg_type == 'event':
logger.info("[wechatmp] Event {} from {}".format(wechatmp_msg.content, wechatmp_msg.from_user_id))
content = subscribe_msg()
replyMsg = reply.TextMsg(wechatmp_msg.from_user_id, wechatmp_msg.to_user_id, content)
return replyMsg.send()
else:
logger.info("暂且不处理")
return "success"
except Exception as exc:
logger.exception(exc)
return exc
+63
View File
@@ -0,0 +1,63 @@
from config import conf
import hashlib
import textwrap
MAX_UTF8_LEN = 2048
class WeChatAPIException(Exception):
pass
def verify_server(data):
try:
if len(data) == 0:
return "None"
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
echostr = data.echostr
token = conf().get('wechatmp_token') #请按照公众平台官网\基本配置中信息填写
data_list = [token, timestamp, nonce]
data_list.sort()
sha1 = hashlib.sha1()
# map(sha1.update, data_list) #python2
sha1.update("".join(data_list).encode('utf-8'))
hashcode = sha1.hexdigest()
print("handle/GET func: hashcode, signature: ", hashcode, signature)
if hashcode == signature:
return echostr
else:
return ""
except Exception as Argument:
return Argument
def subscribe_msg():
trigger_prefix = conf().get('single_chat_prefix',[''])[0]
msg = textwrap.dedent(f"""\
感谢您的关注!
这里是ChatGPT,可以自由对话。
资源有限,回复较慢,请勿着急。
支持通用表情输入。
暂时不支持图片输入。
支持图片输出,画字开头的问题将回复图片链接。
支持角色扮演和文字冒险两种定制模式对话。
输入'{trigger_prefix}#帮助' 查看详细指令。""")
return msg
def split_string_by_utf8_length(string, max_length, max_split=0):
encoded = string.encode('utf-8')
start, end = 0, 0
result = []
while end < len(encoded):
if max_split > 0 and len(result) >= max_split:
result.append(encoded[start:].decode('utf-8'))
break
end = start + max_length
# 如果当前字节不是 UTF-8 编码的开始字节,则向前查找直到找到开始字节为止
while end < len(encoded) and (encoded[end] & 0b11000000) == 0b10000000:
end -= 1
result.append(encoded[start:end].decode('utf-8'))
start = end
return result
+5 -2
View File
@@ -19,7 +19,10 @@ class WeChatMPMessage(ChatMessage):
self.from_user_id = xmlData.find('FromUserName').text
self.create_time = xmlData.find('CreateTime').text
self.msg_type = xmlData.find('MsgType').text
self.msg_id = xmlData.find('MsgId').text
try:
self.msg_id = xmlData.find('MsgId').text
except:
self.msg_id = self.from_user_id+self.create_time
self.is_group = False
# reply to other_user_id
@@ -36,7 +39,7 @@ class WeChatMPMessage(ChatMessage):
self.pic_url = xmlData.find('PicUrl').text
self.media_id = xmlData.find('MediaId').text
elif self.msg_type == 'event':
self.event = xmlData.find('Event').text
self.content = xmlData.find('Event').text
else: # video, shortvideo, location, link
# not implemented
pass
+94 -199
View File
@@ -1,19 +1,17 @@
# -*- coding: utf-8 -*-
import web
import time
import math
import hashlib
import textwrap
from channel.chat_channel import ChatChannel
import channel.wechatmp.reply as reply
import channel.wechatmp.receive as receive
import json
import requests
import threading
from common.singleton import singleton
from common.log import logger
from common.expired_dict import ExpiredDict
from config import conf
from bridge.reply import *
from bridge.context import *
from plugins import *
import traceback
from channel.chat_channel import ChatChannel
from channel.wechatmp.common import *
# If using SSL, uncomment the following lines, and modify the certificate path.
# from cheroot.server import HTTPServer
@@ -22,213 +20,110 @@ import traceback
# certificate='/ssl/cert.pem',
# private_key='/ssl/cert.key')
# from concurrent.futures import ThreadPoolExecutor
# thread_pool = ThreadPoolExecutor(max_workers=8)
@singleton
class WechatMPChannel(ChatChannel):
NOT_SUPPORT_REPLYTYPE = [ReplyType.IMAGE, ReplyType.VOICE]
def __init__(self):
def __init__(self, passive_reply = True):
super().__init__()
self.cache_dict = dict()
self.query1 = dict()
self.query2 = dict()
self.query3 = dict()
self.passive_reply = passive_reply
self.running = set()
self.received_msgs = ExpiredDict(60*60*24)
if self.passive_reply:
self.NOT_SUPPORT_REPLYTYPE = [ReplyType.IMAGE, ReplyType.VOICE]
self.cache_dict = dict()
self.query1 = dict()
self.query2 = dict()
self.query3 = dict()
else:
# TODO support image
self.NOT_SUPPORT_REPLYTYPE = [ReplyType.IMAGE, ReplyType.VOICE]
self.app_id = conf().get('wechatmp_app_id')
self.app_secret = conf().get('wechatmp_app_secret')
self.access_token = None
self.access_token_expires_time = 0
self.access_token_lock = threading.Lock()
self.get_access_token()
def startup(self):
urls = (
'/wx', 'SubsribeAccountQuery',
)
if self.passive_reply:
urls = ('/wx', 'channel.wechatmp.SubscribeAccount.Query')
else:
urls = ('/wx', 'channel.wechatmp.ServiceAccount.Query')
app = web.application(urls, globals(), autoreload=False)
port = conf().get('wechatmp_port', 8080)
web.httpserver.runsimple(app.wsgifunc(), ('0.0.0.0', port))
def send(self, reply: Reply, context: Context):
reply_cnt = math.ceil(len(reply.content) / 600)
receiver = context["receiver"]
self.cache_dict[receiver] = (reply_cnt, reply.content)
logger.debug("[send] reply to {} saved to cache: {}".format(receiver, reply))
def wechatmp_request(self, method, url, **kwargs):
r = requests.request(method=method, url=url, **kwargs)
r.raise_for_status()
r.encoding = "utf-8"
ret = r.json()
if "errcode" in ret and ret["errcode"] != 0:
raise WeChatAPIException("{}".format(ret))
return ret
def get_access_token(self):
def verify_server():
try:
data = web.input()
if len(data) == 0:
return "None"
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
echostr = data.echostr
token = conf().get('wechatmp_token') #请按照公众平台官网\基本配置中信息填写
# return the access_token
if self.access_token:
if self.access_token_expires_time - time.time() > 60:
return self.access_token
data_list = [token, timestamp, nonce]
data_list.sort()
sha1 = hashlib.sha1()
# map(sha1.update, data_list) #python2
sha1.update("".join(data_list).encode('utf-8'))
hashcode = sha1.hexdigest()
print("handle/GET func: hashcode, signature: ", hashcode, signature)
if hashcode == signature:
return echostr
# Get new access_token
# Do not request access_token in parallel! Only the last obtained is valid.
if self.access_token_lock.acquire(blocking=False):
# Wait for other threads that have previously obtained access_token to complete the request
# This happens every 2 hours, so it doesn't affect the experience very much
time.sleep(1)
self.access_token = None
url="https://api.weixin.qq.com/cgi-bin/token"
params={
"grant_type": "client_credential",
"appid": self.app_id,
"secret": self.app_secret
}
data = self.wechatmp_request(method='get', url=url, params=params)
self.access_token = data['access_token']
self.access_token_expires_time = int(time.time()) + data['expires_in']
logger.info("[wechatmp] access_token: {}".format(self.access_token))
self.access_token_lock.release()
else:
return ""
except Exception as Argument:
return Argument
# Wait for token update
while self.access_token_lock.locked():
time.sleep(0.1)
return self.access_token
def send(self, reply: Reply, context: Context):
if self.passive_reply:
receiver = context["receiver"]
self.cache_dict[receiver] = reply.content
logger.info("[send] reply to {} saved to cache: {}".format(receiver, reply))
else:
receiver = context["receiver"]
reply_text = reply.content
url="https://api.weixin.qq.com/cgi-bin/message/custom/send"
params = {
"access_token": self.get_access_token()
}
json_data = {
"touser": receiver,
"msgtype": "text",
"text": {"content": reply_text}
}
self.wechatmp_request(method='post', url=url, params=params, data=json.dumps(json_data, ensure_ascii=False).encode('utf8'))
logger.info("[send] Do send to {}: {}".format(receiver, reply_text))
return
# This class is instantiated once per query
class SubsribeAccountQuery():
def GET(self):
return verify_server()
def POST(self):
channel_instance = WechatMPChannel()
try:
query_time = time.time()
webData = web.data()
# logger.debug("[wechatmp] Receive request:\n" + webData.decode("utf-8"))
wechat_msg = receive.parse_xml(webData)
if wechat_msg.msg_type == 'text':
from_user = wechat_msg.from_user_id
to_user = wechat_msg.to_user_id
message = wechat_msg.content.decode("utf-8")
message_id = wechat_msg.msg_id
logger.info("[wechatmp] {}:{} Receive post query {} {}: {}".format(web.ctx.env.get('REMOTE_ADDR'), web.ctx.env.get('REMOTE_PORT'), from_user, message_id, message))
cache_key = from_user
cache = channel_instance.cache_dict.get(cache_key)
reply_text = ""
# New request
if cache == None:
# The first query begin, reset the cache
context = channel_instance._compose_context(ContextType.TEXT, message, isgroup=False, msg=wechat_msg)
logger.debug("[wechatmp] context: {} {}".format(context, wechat_msg))
if context:
# set private openai_api_key
# if from_user is not changed in itchat, this can be placed at chat_channel
user_data = conf().get_user_data(from_user)
context['openai_api_key'] = user_data.get('openai_api_key') # None or user openai_api_key
channel_instance.cache_dict[cache_key] = (0, "")
channel_instance.produce(context)
else:
trigger_prefix = conf().get('single_chat_prefix',[''])[0]
if trigger_prefix:
content = textwrap.dedent(f"""\
请输入'{trigger_prefix}'接你想说的话跟我说话。
例如:
{trigger_prefix}你好,很高兴见到你。""")
else:
logger.error(f"[wechatmp] unknown error")
content = textwrap.dedent("""\
未知错误,请稍后再试""")
replyMsg = reply.TextMsg(wechat_msg.from_user_id, wechat_msg.to_user_id, content)
return replyMsg.send()
channel_instance.query1[cache_key] = False
channel_instance.query2[cache_key] = False
channel_instance.query3[cache_key] = False
# Request again
elif cache[0] == 0 and channel_instance.query1.get(cache_key) == True and channel_instance.query2.get(cache_key) == True and channel_instance.query3.get(cache_key) == True:
channel_instance.query1[cache_key] = False #To improve waiting experience, this can be set to True.
channel_instance.query2[cache_key] = False #To improve waiting experience, this can be set to True.
channel_instance.query3[cache_key] = False
elif cache[0] >= 1:
# Skip the waiting phase
channel_instance.query1[cache_key] = True
channel_instance.query2[cache_key] = True
channel_instance.query3[cache_key] = True
def _success_callback(self, session_id, context, **kwargs): # 线程异常结束时的回调函数
logger.debug("[wechatmp] Success to generate reply, msgId={}".format(context['msg'].msg_id))
if self.passive_reply:
self.running.remove(session_id)
cache = channel_instance.cache_dict.get(cache_key)
if channel_instance.query1.get(cache_key) == False:
# The first query from wechat official server
logger.debug("[wechatmp] query1 {}".format(cache_key))
channel_instance.query1[cache_key] = True
cnt = 0
while cache[0] == 0 and cnt < 45:
cnt = cnt + 1
time.sleep(0.1)
cache = channel_instance.cache_dict.get(cache_key)
if cnt == 45:
# waiting for timeout (the POST query will be closed by wechat official server)
time.sleep(5)
# and do nothing
return
else:
pass
elif channel_instance.query2.get(cache_key) == False:
# The second query from wechat official server
logger.debug("[wechatmp] query2 {}".format(cache_key))
channel_instance.query2[cache_key] = True
cnt = 0
while cache[0] == 0 and cnt < 45:
cnt = cnt + 1
time.sleep(0.1)
cache = channel_instance.cache_dict.get(cache_key)
if cnt == 45:
# waiting for timeout (the POST query will be closed by wechat official server)
time.sleep(5)
# and do nothing
return
else:
pass
elif channel_instance.query3.get(cache_key) == False:
# The third query from wechat official server
logger.debug("[wechatmp] query3 {}".format(cache_key))
channel_instance.query3[cache_key] = True
cnt = 0
while cache[0] == 0 and cnt < 40:
cnt = cnt + 1
time.sleep(0.1)
cache = channel_instance.cache_dict.get(cache_key)
if cnt == 40:
# Have waiting for 3x5 seconds
# return timeout message
reply_text = "【正在思考中,回复任意文字尝试获取回复】"
logger.info("[wechatmp] Three queries has finished For {}: {}".format(from_user, message_id))
replyPost = reply.TextMsg(from_user, to_user, reply_text).send()
return replyPost
else:
pass
if float(time.time()) - float(query_time) > 4.8:
logger.info("[wechatmp] Timeout for {} {}".format(from_user, message_id))
return
if cache[0] > 1:
reply_text = cache[1][:600] + "\n【未完待续,回复任意文字以继续】" #wechatmp auto_reply length limit
channel_instance.cache_dict[cache_key] = (cache[0] - 1, cache[1][600:])
elif cache[0] == 1:
reply_text = cache[1]
channel_instance.cache_dict.pop(cache_key)
logger.info("[wechatmp] {}:{} Do send {}".format(web.ctx.env.get('REMOTE_ADDR'), web.ctx.env.get('REMOTE_PORT'), reply_text))
replyPost = reply.TextMsg(from_user, to_user, reply_text).send()
return replyPost
elif wechat_msg.msg_type == 'event':
logger.info("[wechatmp] Event {} from {}".format(wechat_msg.Event, wechat_msg.from_user_id))
trigger_prefix = conf().get('single_chat_prefix',[''])[0]
content = textwrap.dedent(f"""\
感谢您的关注!
这里是ChatGPT,可以自由对话。
资源有限,回复较慢,请勿着急。
支持通用表情输入。
暂时不支持图片输入。
支持图片输出,画字开头的问题将回复图片链接。
支持角色扮演和文字冒险两种定制模式对话。
输入'{trigger_prefix}#帮助' 查看详细指令。""")
replyMsg = reply.TextMsg(wechat_msg.from_user_id, wechat_msg.to_user_id, content)
return replyMsg.send()
else:
logger.info("暂且不处理")
return "success"
except Exception as exc:
logger.exception(exc)
return exc
def _fail_callback(self, session_id, exception, context, **kwargs): # 线程异常结束时的回调函数
logger.exception("[wechatmp] Fail to generate reply to user, msgId={}, exception={}".format(context['msg'].msg_id, exception))
if self.passive_reply:
assert session_id not in self.cache_dict
self.running.remove(session_id)
+1 -1
View File
@@ -12,7 +12,7 @@ class TmpDir(object):
def __init__(self):
pathExists = os.path.exists(self.tmpFilePath)
if not pathExists and conf().get('speech_recognition') == True:
if not pathExists:
os.makedirs(self.tmpFilePath)
def path(self):
-1
View File
@@ -2,7 +2,6 @@
"open_ai_api_key": "YOUR API KEY",
"model": "gpt-3.5-turbo",
"proxy": "",
"use_azure_chatgpt": false,
"single_chat_prefix": ["bot", "@bot"],
"single_chat_reply_prefix": "[bot] ",
"group_chat_prefix": ["@bot"],
+6 -3
View File
@@ -16,6 +16,7 @@ available_setting = {
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
"model": "gpt-3.5-turbo",
"use_azure_chatgpt": False, # 是否使用azure的chatgpt
"azure_deployment_id": "", #azure 模型部署名称
# Bot触发配置
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
@@ -79,14 +80,16 @@ available_setting = {
"wechaty_puppet_service_token": "", # wechaty的token
# wechatmp的配置
"wechatmp_token": "", # 微信公众平台的Token
"wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
"wechatmp_token": "", # 微信公众平台的Token
"wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
"wechatmp_app_id": "", # 微信公众平台的appID,仅服务号需要
"wechatmp_app_secret": "", # 微信公众平台的appsecret,仅服务号需要
# chatgpt指令自定义触发词
"clear_memory_commands": ['#清除记忆'], # 重置会话指令,必须以#开头
# channel配置
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp}
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp,wechatmp_service}
"debug": False, # 是否开启debug模式,开启后会打印更多日志
+8 -6
View File
@@ -1,7 +1,7 @@
**Table of Content**
- [插件化初衷](#插件化初衷)
- [插件安装方法](#插件安装方法)
- [插件安装方法](#插件安装方法)
- [插件化实现](#插件化实现)
- [插件编写示例](#插件编写示例)
- [插件设计建议](#插件设计建议)
@@ -23,9 +23,9 @@
在本仓库中预置了一些插件,如果要安装其他仓库的插件,有两种方法。
- 第一种方法是在将下载的插件文件都解压到"plugins"文件夹的一个单独的文件夹,最终插件的代码都位于"plugins/PLUGIN_NAME/*"中。启动程序后,如果插件的目录结构正确,插件会自动被扫描加载。
- 第一种方法是在将下载的插件文件都解压到"plugins"文件夹的一个单独的文件夹,最终插件的代码都位于"plugins/PLUGIN_NAME/*"中。启动程序后,如果插件的目录结构正确,插件会自动被扫描加载。除此以外,注意你还需要安装文件夹中`requirements.txt`中的依赖。
- 第二种方法是`Godcmd`插件,它是预置的管理员插件,能够让程序在运行时就能安装插件。
- 第二种方法是`Godcmd`插件,它是预置的管理员插件,能够让程序在运行时就能安装插件,它能够自动安装依赖
安装插件的命令是"#installp [仓库源](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/plugins/source.json)记录的插件名/仓库地址"。这是管理员命令,认证方法在[这里](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/godcmd)。
@@ -52,6 +52,8 @@
以下是它们的默认处理逻辑(太长不看,可跳到[插件编写示例](#插件编写示例))
**注意以下包含的代码是`v1.1.0`中的片段,已过时,只可用于理解事件,最新的默认代码逻辑请参考[chat_channel](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/chat_channel.py)**
#### 1. 收到消息
负责接收用户消息,根据用户的配置,判断本条消息是否触发机器人。如果触发,则会判断该消息的类型(声音、文本、画图命令等),将消息包装成如下的`Context`交付给下一个步骤。
@@ -91,9 +93,9 @@
if context.type == ContextType.TEXT or context.type == ContextType.IMAGE_CREATE:
reply = super().build_reply_content(context.content, context) #文字跟画图交付给chatgpt
elif context.type == ContextType.VOICE: # 声音先进行语音转文字后,修改Context类型为文字后,再交付给chatgpt
msg = context['msg']
file_name = TmpDir().path() + context.content
msg.download(file_name)
cmsg = context['msg']
cmsg.prepare()
file_name = context.content
reply = super().build_voice_to_text(file_name)
if reply.type != ReplyType.ERROR and reply.type != ReplyType.INFO:
context.content = reply.content # 语音转文字后,将文字内容作为新的context
+10
View File
@@ -104,6 +104,11 @@ ADMIN_COMMANDS = {
"args": ["插件名"],
"desc": "卸载指定插件",
},
"updatep": {
"alias": ["updatep", "更新插件"],
"args": ["插件名"],
"desc": "更新指定插件",
},
"debug": {
"alias": ["debug", "调试模式", "DEBUG"],
"desc": "开启机器调试日志",
@@ -336,6 +341,11 @@ class Godcmd(Plugin):
ok, result = False, "请提供插件名"
else:
ok, result = PluginManager().uninstall_plugin(args[0])
elif cmd == "updatep":
if len(args) != 1:
ok, result = False, "请提供插件名"
else:
ok, result = PluginManager().update_plugin(args[0])
logger.debug("[Godcmd] admin command: %s by %s" % (cmd, user))
else:
ok, result = False, "需要管理员权限才能执行该指令"
+26 -2
View File
@@ -220,7 +220,7 @@ class PluginManager:
if not match:
try:
with open("./plugins/source.json","r") as f:
with open("./plugins/source.json","r", encoding="utf-8") as f:
source = json.load(f)
if repo in source["repo"]:
repo = source["repo"][repo]["url"]
@@ -238,11 +238,35 @@ class PluginManager:
if os.path.exists(os.path.join(dirname,"requirements.txt")):
logger.info("detect requirements.txtinstalling...")
pkgmgr.install_requirements(os.path.join(dirname,"requirements.txt"))
return True, "安装插件成功,请使用#scanp命令扫描插件或重启程序"
return True, "安装插件成功,请使用 #scanp 命令扫描插件或重启程序,开启前请检查插件是否需要配置"
except Exception as e:
logger.error("Failed to install plugin, {}".format(e))
return False, "安装插件失败,"+str(e)
def update_plugin(self, name:str):
try:
import common.package_manager as pkgmgr
pkgmgr.check_dulwich()
except Exception as e:
logger.error("Failed to install plugin, {}".format(e))
return False, "无法导入dulwich,更新插件失败"
from dulwich import porcelain
name = name.upper()
if name not in self.plugins:
return False, "插件不存在"
if name in ["HELLO","GODCMD","ROLE","TOOL","BDUNIT","BANWORDS","FINISH","DUNGEON"]:
return False, "预置插件无法更新,请更新主程序仓库"
dirname = self.plugins[name].path
try:
porcelain.pull(dirname, "origin")
if os.path.exists(os.path.join(dirname,"requirements.txt")):
logger.info("detect requirements.txtinstalling...")
pkgmgr.install_requirements(os.path.join(dirname,"requirements.txt"))
return True, "更新插件成功,请重新运行程序"
except Exception as e:
logger.error("Failed to update plugin, {}".format(e))
return False, "更新插件失败,"+str(e)
def uninstall_plugin(self, name:str):
name = name.upper()
if name not in self.plugins:
+4
View File
@@ -3,6 +3,10 @@
"sdwebui": {
"url": "https://github.com/lanvent/plugin_sdwebui.git",
"desc": "利用stable-diffusion画图的插件"
},
"replicate": {
"url": "https://github.com/lanvent/plugin_replicate.git",
"desc": "利用replicate api画图的插件"
}
}
}
+1 -1
View File
@@ -21,4 +21,4 @@ web.py
# chatgpt-tool-hub plugin
--extra-index-url https://pypi.python.org/simple
chatgpt_tool_hub>=0.3.5
chatgpt_tool_hub>=0.3.7
+1 -1
View File
@@ -1,4 +1,4 @@
openai>=0.27.2
openai==0.27.2
HTMLParser>=0.0.2
PyQRCode>=1.2.1
qrcode>=7.4.2