mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-05-09 12:51:27 +08:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 88bf345b91 | |||
| ab4ff3d1a3 | |||
| 3502e0d643 | |||
| 995894d3aa | |||
| 4da8714124 | |||
| 6b247ae880 | |||
| 176941ea3b | |||
| 5176b56d3b | |||
| 8abf18ab25 | |||
| 395edbd9f4 | |||
| 2386eb8fc2 | |||
| 68208f82a0 | |||
| 24b63bc5bd | |||
| 1817a972c6 |
@@ -1,6 +1,8 @@
|
||||
.DS_Store
|
||||
.idea
|
||||
.vscode
|
||||
.venv
|
||||
.vs
|
||||
.wechaty/
|
||||
__pycache__/
|
||||
venv*
|
||||
@@ -22,6 +24,8 @@ plugins/**/
|
||||
!plugins/tool
|
||||
!plugins/banwords
|
||||
!plugins/banwords/**/
|
||||
plugins/banwords/__pycache__
|
||||
plugins/banwords/lib/__pycache__
|
||||
!plugins/hello
|
||||
!plugins/role
|
||||
!plugins/keyword
|
||||
|
||||
@@ -5,11 +5,12 @@
|
||||
最新版本支持的功能如下:
|
||||
|
||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信,微信公众号和企业微信应用等部署方式
|
||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3,GPT-3.5,GPT-4模型
|
||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3, GPT-3.5, GPT-4, 文心一言模型
|
||||
- [x] **语音识别:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai等多种语音模型
|
||||
- [x] **图片生成:** 支持图片生成 和 图生图(如照片修复),可选择 Dell-E, stable diffusion, replicate模型
|
||||
- [x] **图片生成:** 支持图片生成 和 图生图(如照片修复),可选择 Dell-E, stable diffusion, replicate, midjourney模型
|
||||
- [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结等插件
|
||||
- [X] **Tool工具:** 与操作系统和互联网交互,支持最新信息搜索、数学计算、天气和资讯查询、网页总结,基于 [chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub) 实现
|
||||
- [x] **知识库:** 通过上传知识库文件自定义专属机器人,可作为数字分身、领域知识库、智能客服使用,基于 [LinkAI](https://chat.link-ai.tech/console) 实现
|
||||
|
||||
> 欢迎接入更多应用,参考 [Terminal代码](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/terminal/terminal_channel.py)实现接收和发送消息逻辑即可接入。 同时欢迎增加新的插件,参考 [插件说明文档](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)。
|
||||
|
||||
@@ -27,7 +28,9 @@ Demo made by [Visionn](https://www.wangpc.cc/)
|
||||
|
||||
# 更新日志
|
||||
|
||||
>**2023.06.12:** 接入 [LinkAI](https://chat.link-ai.tech/console) 平台,可在线创建 个人知识库,并接入微信、公众号及企业微信中。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。
|
||||
>**2023.08.08:** 接入百度文心一言模型,通过 [插件](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/linkai) 支持 Midjourney 绘图
|
||||
|
||||
>**2023.06.12:** 接入 [LinkAI](https://chat.link-ai.tech/console) 平台,可在线创建个人知识库,并接入微信、公众号及企业微信中,打造专属客服机器人。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。
|
||||
|
||||
>**2023.04.26:** 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatcom/README.md)。(contributed by [@lanvent](https://github.com/lanvent) in [#944](https://github.com/zhayujie/chatgpt-on-wechat/pull/944))
|
||||
|
||||
@@ -151,7 +154,7 @@ pip3 install azure-cognitiveservices-speech
|
||||
|
||||
**4.其他配置**
|
||||
|
||||
+ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `text-davinci-003`, `gpt-4`, `gpt-4-32k` (其中gpt-4 api暂未完全开放,申请通过后可使用)
|
||||
+ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `text-davinci-003`, `gpt-4`, `gpt-4-32k`, `wenxin` (其中gpt-4 api暂未完全开放,申请通过后可使用)
|
||||
+ `temperature`,`frequency_penalty`,`presence_penalty`: Chat API接口参数,详情参考[OpenAI官方文档。](https://platform.openai.com/docs/api-reference/chat)
|
||||
+ `proxy`:由于目前 `openai` 接口国内无法访问,需配置代理客户端的地址,详情参考 [#351](https://github.com/zhayujie/chatgpt-on-wechat/issues/351)
|
||||
+ 对于图像生成,在满足个人或群组触发条件外,还需要额外的关键词前缀来触发,对应配置 `image_create_prefix `
|
||||
|
||||
@@ -0,0 +1,104 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import requests, json
|
||||
from bot.bot import Bot
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from config import conf
|
||||
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
|
||||
|
||||
BAIDU_API_KEY = conf().get("baidu_wenxin_api_key")
|
||||
BAIDU_SECRET_KEY = conf().get("baidu_wenxin_secret_key")
|
||||
|
||||
class BaiduWenxinBot(Bot):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("baidu_wenxin_model") or "eb-instant")
|
||||
|
||||
def reply(self, query, context=None):
|
||||
# acquire reply content
|
||||
if context and context.type:
|
||||
if context.type == ContextType.TEXT:
|
||||
logger.info("[BAIDU] query={}".format(query))
|
||||
session_id = context["session_id"]
|
||||
reply = None
|
||||
if query == "#清除记忆":
|
||||
self.sessions.clear_session(session_id)
|
||||
reply = Reply(ReplyType.INFO, "记忆已清除")
|
||||
elif query == "#清除所有":
|
||||
self.sessions.clear_all_session()
|
||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
|
||||
else:
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
result = self.reply_text(session)
|
||||
total_tokens, completion_tokens, reply_content = (
|
||||
result["total_tokens"],
|
||||
result["completion_tokens"],
|
||||
result["content"],
|
||||
)
|
||||
logger.debug(
|
||||
"[BAIDU] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content, completion_tokens)
|
||||
)
|
||||
|
||||
if total_tokens == 0:
|
||||
reply = Reply(ReplyType.ERROR, reply_content)
|
||||
else:
|
||||
self.sessions.session_reply(reply_content, session_id, total_tokens)
|
||||
reply = Reply(ReplyType.TEXT, reply_content)
|
||||
return reply
|
||||
elif context.type == ContextType.IMAGE_CREATE:
|
||||
ok, retstring = self.create_img(query, 0)
|
||||
reply = None
|
||||
if ok:
|
||||
reply = Reply(ReplyType.IMAGE_URL, retstring)
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, retstring)
|
||||
return reply
|
||||
|
||||
def reply_text(self, session: BaiduWenxinSession, retry_count=0):
|
||||
try:
|
||||
logger.info("[BAIDU] model={}".format(session.model))
|
||||
access_token = self.get_access_token()
|
||||
if access_token == 'None':
|
||||
logger.warn("[BAIDU] access token 获取失败")
|
||||
return {
|
||||
"total_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"content": 0,
|
||||
}
|
||||
url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/" + session.model + "?access_token=" + access_token
|
||||
headers = {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
payload = {'messages': session.messages}
|
||||
response = requests.request("POST", url, headers=headers, data=json.dumps(payload))
|
||||
response_text = json.loads(response.text)
|
||||
res_content = response_text["result"]
|
||||
total_tokens = response_text["usage"]["total_tokens"]
|
||||
completion_tokens = response_text["usage"]["completion_tokens"]
|
||||
logger.info("[BAIDU] reply={}".format(res_content))
|
||||
return {
|
||||
"total_tokens": total_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"content": res_content,
|
||||
}
|
||||
except Exception as e:
|
||||
need_retry = retry_count < 2
|
||||
logger.warn("[BAIDU] Exception: {}".format(e))
|
||||
need_retry = False
|
||||
self.sessions.clear_session(session.session_id)
|
||||
result = {"completion_tokens": 0, "content": "出错了: {}".format(e)}
|
||||
return result
|
||||
|
||||
def get_access_token(self):
|
||||
"""
|
||||
使用 AK,SK 生成鉴权签名(Access Token)
|
||||
:return: access_token,或是None(如果错误)
|
||||
"""
|
||||
url = "https://aip.baidubce.com/oauth/2.0/token"
|
||||
params = {"grant_type": "client_credentials", "client_id": BAIDU_API_KEY, "client_secret": BAIDU_SECRET_KEY}
|
||||
return str(requests.post(url, params=params).json().get("access_token"))
|
||||
@@ -0,0 +1,87 @@
|
||||
from bot.session_manager import Session
|
||||
from common.log import logger
|
||||
|
||||
"""
|
||||
e.g. [
|
||||
{"role": "user", "content": "Who won the world series in 2020?"},
|
||||
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
|
||||
{"role": "user", "content": "Where was it played?"}
|
||||
]
|
||||
"""
|
||||
|
||||
class BaiduWenxinSession(Session):
|
||||
def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"):
|
||||
super().__init__(session_id, system_prompt)
|
||||
self.model = model
|
||||
# 百度文心不支持system prompt
|
||||
# self.reset()
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
# pdb.set_trace()
|
||||
precise = True
|
||||
try:
|
||||
cur_tokens = self.calc_tokens()
|
||||
except Exception as e:
|
||||
precise = False
|
||||
if cur_tokens is None:
|
||||
raise e
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
while cur_tokens > max_tokens:
|
||||
if len(self.messages) > 2:
|
||||
self.messages.pop(1)
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
|
||||
self.messages.pop(1)
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
break
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
|
||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
|
||||
break
|
||||
else:
|
||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
|
||||
break
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
return cur_tokens
|
||||
|
||||
def calc_tokens(self):
|
||||
return num_tokens_from_messages(self.messages, self.model)
|
||||
|
||||
|
||||
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
def num_tokens_from_messages(messages, model):
|
||||
"""Returns the number of tokens used by a list of messages."""
|
||||
import tiktoken
|
||||
|
||||
if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo"]:
|
||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
|
||||
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k"]:
|
||||
return num_tokens_from_messages(messages, model="gpt-4")
|
||||
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
logger.debug("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif model == "gpt-4":
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
|
||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
+6
-3
@@ -11,10 +11,13 @@ def create_bot(bot_type):
|
||||
:return: bot instance
|
||||
"""
|
||||
if bot_type == const.BAIDU:
|
||||
# Baidu Unit对话接口
|
||||
from bot.baidu.baidu_unit_bot import BaiduUnitBot
|
||||
# 替换Baidu Unit为Baidu文心千帆对话接口
|
||||
# from bot.baidu.baidu_unit_bot import BaiduUnitBot
|
||||
# return BaiduUnitBot()
|
||||
|
||||
return BaiduUnitBot()
|
||||
from bot.baidu.baidu_wenxin import BaiduWenxinBot
|
||||
|
||||
return BaiduWenxinBot()
|
||||
|
||||
elif bot_type == const.CHATGPT:
|
||||
# ChatGPT 网页端web接口
|
||||
|
||||
@@ -64,15 +64,16 @@ class LinkAIBot(Bot, OpenAIImage):
|
||||
session_id = context["session_id"]
|
||||
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
|
||||
model = conf().get("model") or "gpt-3.5-turbo"
|
||||
# remove system message
|
||||
if app_code and session.messages[0].get("role") == "system":
|
||||
session.messages.pop(0)
|
||||
if session.messages[0].get("role") == "system":
|
||||
if app_code or model == "wenxin":
|
||||
session.messages.pop(0)
|
||||
|
||||
body = {
|
||||
"app_code": app_code,
|
||||
"messages": session.messages,
|
||||
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
|
||||
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin
|
||||
"temperature": conf().get("temperature"),
|
||||
"top_p": conf().get("top_p", 1),
|
||||
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
|
||||
@@ -23,6 +23,8 @@ class Bridge(object):
|
||||
self.btype["chat"] = const.OPEN_AI
|
||||
if conf().get("use_azure_chatgpt", False):
|
||||
self.btype["chat"] = const.CHATGPTONAZURE
|
||||
if model_type in ["wenxin"]:
|
||||
self.btype["chat"] = const.BAIDU
|
||||
if conf().get("use_linkai") and conf().get("linkai_api_key"):
|
||||
self.btype["chat"] = const.LINKAI
|
||||
self.bots = {}
|
||||
@@ -54,3 +56,9 @@ class Bridge(object):
|
||||
|
||||
def fetch_translate(self, text, from_lang="", to_lang="en") -> Reply:
|
||||
return self.get_bot("translate").translate(text, from_lang, to_lang)
|
||||
|
||||
def reset_bot(self):
|
||||
"""
|
||||
重置bot路由
|
||||
"""
|
||||
self.__init__()
|
||||
|
||||
@@ -108,8 +108,12 @@ class ChatChannel(Channel):
|
||||
if not conf().get("group_at_off", False):
|
||||
flag = True
|
||||
pattern = f"@{re.escape(self.name)}(\u2005|\u0020)"
|
||||
content = re.sub(pattern, r"", content)
|
||||
|
||||
subtract_res = re.sub(pattern, r"", content)
|
||||
if subtract_res == content and context["msg"].self_display_name:
|
||||
# 前缀移除后没有变化,使用群昵称再次移除
|
||||
pattern = f"@{re.escape(context['msg'].self_display_name)}(\u2005|\u0020)"
|
||||
subtract_res = re.sub(pattern, r"", content)
|
||||
content = subtract_res
|
||||
if not flag:
|
||||
if context["origin_ctype"] == ContextType.VOICE:
|
||||
logger.info("[WX]receive group voice, but checkprefix didn't match")
|
||||
|
||||
@@ -24,9 +24,7 @@ is_at: 是否被at
|
||||
- (群消息时,一般会存在实际发送者,是群内某个成员的id和昵称,下列项仅在群消息时存在)
|
||||
actual_user_id: 实际发送者id (群聊必填)
|
||||
actual_user_nickname:实际发送者昵称
|
||||
|
||||
|
||||
|
||||
self_display_name: 自身的展示名,设置群昵称时,该字段表示群昵称
|
||||
|
||||
_prepare_fn: 准备函数,用于准备消息的内容,比如下载图片等,
|
||||
_prepared: 是否已经调用过准备函数
|
||||
@@ -49,6 +47,7 @@ class ChatMessage(object):
|
||||
other_user_id = None
|
||||
other_user_nickname = None
|
||||
my_msg = False
|
||||
self_display_name = None
|
||||
|
||||
is_group = False
|
||||
is_at = False
|
||||
|
||||
@@ -58,7 +58,7 @@ def _check(func):
|
||||
if conf().get("hot_reload") == True and int(create_time) < int(time.time()) - 60: # 跳过1分钟前的历史消息
|
||||
logger.debug("[WX]history message {} skipped".format(msgId))
|
||||
return
|
||||
if cmsg.my_msg:
|
||||
if cmsg.my_msg and not cmsg.is_group:
|
||||
logger.debug("[WX]my message {} skipped".format(msgId))
|
||||
return
|
||||
return func(self, cmsg)
|
||||
|
||||
@@ -57,7 +57,8 @@ class WechatMessage(ChatMessage):
|
||||
self.from_user_nickname = nickname
|
||||
if self.to_user_id == user_id:
|
||||
self.to_user_nickname = nickname
|
||||
try: # 陌生人时候, 'User'字段可能不存在
|
||||
try: # 陌生人时候, User字段可能不存在
|
||||
# my_msg 为True是表示是自己发送的消息
|
||||
self.my_msg = itchat_msg["ToUserName"] == itchat_msg["User"]["UserName"] and \
|
||||
itchat_msg["ToUserName"] != itchat_msg["FromUserName"]
|
||||
self.other_user_id = itchat_msg["User"]["UserName"]
|
||||
@@ -66,6 +67,9 @@ class WechatMessage(ChatMessage):
|
||||
self.from_user_nickname = self.other_user_nickname
|
||||
if self.other_user_id == self.to_user_id:
|
||||
self.to_user_nickname = self.other_user_nickname
|
||||
if itchat_msg["User"].get("Self"):
|
||||
# 自身的展示名,当设置了群昵称时,该字段表示群昵称
|
||||
self.self_display_name = itchat_msg["User"].get("Self").get("DisplayName")
|
||||
except KeyError as e: # 处理偶尔没有对方信息的情况
|
||||
logger.warn("[WX]get other_user_id failed: " + str(e))
|
||||
if self.from_user_id == user_id:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"open_ai_api_key": "YOUR API KEY",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"channel_type": "wx",
|
||||
"proxy": "",
|
||||
"hot_reload": false,
|
||||
"single_chat_prefix": [
|
||||
|
||||
@@ -16,7 +16,7 @@ available_setting = {
|
||||
"open_ai_api_base": "https://api.openai.com/v1",
|
||||
"proxy": "", # openai使用的代理
|
||||
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
|
||||
"model": "gpt-3.5-turbo",
|
||||
"model": "gpt-3.5-turbo", # 还支持 gpt-3.5-turbo-16k, gpt-4, wenxin
|
||||
"use_azure_chatgpt": False, # 是否使用azure的chatgpt
|
||||
"azure_deployment_id": "", # azure 模型部署名称
|
||||
"azure_api_version": "", # azure api版本
|
||||
@@ -51,6 +51,10 @@ available_setting = {
|
||||
"presence_penalty": 0,
|
||||
"request_timeout": 60, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
|
||||
"timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试
|
||||
# Baidu 文心一言参数
|
||||
"baidu_wenxin_model": "eb-instant", # 默认使用ERNIE-Bot-turbo模型
|
||||
"baidu_wenxin_api_key": "", # Baidu api key
|
||||
"baidu_wenxin_secret_key": "", # Baidu secret key
|
||||
# 语音设置
|
||||
"speech_recognition": False, # 是否开启语音识别
|
||||
"group_speech_recognition": False, # 是否开启群组语音识别
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
## 插件说明
|
||||
|
||||
基于 LinkAI 提供的知识库、Midjourney绘画等能力对机器人的功能进行增强。地址: https://chat.link-ai.tech/console
|
||||
基于 LinkAI 提供的知识库、Midjourney绘画等能力对机器人的功能进行增强。平台地址: https://chat.link-ai.tech/console
|
||||
|
||||
## 插件配置
|
||||
|
||||
将 `plugins/linkai` 下的 `config.json.template` 复制为 `config.json`。如果是`docker`部署,可通过映射 plugins/config.json 来完成配置。以下是配置项说明:
|
||||
将 `plugins/linkai` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`:
|
||||
|
||||
以下是配置项说明:
|
||||
|
||||
```bash
|
||||
{
|
||||
@@ -23,11 +25,15 @@
|
||||
}
|
||||
|
||||
```
|
||||
注意:实际 `config.json` 配置中应保证json格式,不应携带 '#' 及后面的注释。
|
||||
注意:
|
||||
|
||||
- 配置项中 `group_app_map` 部分是用于映射群聊与LinkAI平台上的应用, `midjourney` 部分是 mj 画图的配置,可根据需要进行填写,未填写配置时默认不开启相应功能
|
||||
- 实际 `config.json` 配置中应保证json格式,不应携带 '#' 及后面的注释
|
||||
- 如果是`docker`部署,可通过映射 `plugins/config.json` 到容器中来完成插件配置,参考[文档](https://github.com/zhayujie/chatgpt-on-wechat#3-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
|
||||
## 插件使用
|
||||
|
||||
> 使用插件中的知识库管理功能需要首先开启`linkai`对话,依赖于全局 `config.json` 中的 `use_linkai` 和 `linkai_api_key` 配置;midjourney绘画功能则只需填写 `linkai_api_key` 配置。
|
||||
> 使用插件中的知识库管理功能需要首先开启`linkai`对话,依赖全局 `config.json` 中的 `use_linkai` 和 `linkai_api_key` 配置;而midjourney绘画功能则只需填写 `linkai_api_key` 配置,`use_linkai` 无论是否关闭均可使用。具体可参考 [详细文档](https://link-ai.tech/platform/link-app/wechat)。
|
||||
|
||||
完成配置后运行项目,会自动运行插件,输入 `#help linkai` 可查看插件功能。
|
||||
|
||||
@@ -41,6 +47,8 @@
|
||||
|
||||
例如输入 `$linkai app Kv2fXJcH`,即将当前群聊与 app_code为 Kv2fXJcH 的应用绑定。
|
||||
|
||||
另外,还可以通过 `$linkai close` 来一键关闭linkai对话,此时就会使用默认的openai接口;同理,发送 `$linkai open` 可以再次开启。
|
||||
|
||||
### 2.Midjourney绘画功能
|
||||
|
||||
指令格式:
|
||||
@@ -48,6 +56,8 @@
|
||||
```
|
||||
- 图片生成: $mj 描述词1, 描述词2..
|
||||
- 图片放大: $mju 图片ID 图片序号
|
||||
- 图片变换: $mjv 图片ID 图片序号
|
||||
- 重置: $mjr 图片ID
|
||||
```
|
||||
|
||||
例如:
|
||||
@@ -55,4 +65,11 @@
|
||||
```
|
||||
"$mj a little cat, white --ar 9:16"
|
||||
"$mju 1105592717188272288 2"
|
||||
"$mjv 11055927171882 2"
|
||||
"$mjr 11055927171882"
|
||||
```
|
||||
|
||||
注:
|
||||
1. 开启 `use_image_create_prefix` 配置后可直接复用全局画图触发词,以"画"开头便可以生成图片。
|
||||
2. 提示词内容中包含敏感词或者参数格式错误可能导致绘画失败,生成失败不消耗积分
|
||||
3. 使用 `$mj open` 和 `$mj close` 指令可以快速打开和关闭绘图功能
|
||||
|
||||
+43
-16
@@ -1,19 +1,10 @@
|
||||
import asyncio
|
||||
import json
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import plugins
|
||||
from bridge.context import ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from channel.chat_message import ChatMessage
|
||||
from common.log import logger
|
||||
from config import conf, global_config
|
||||
from config import global_config
|
||||
from plugins import *
|
||||
from .midjourney import MJBot, TaskType
|
||||
|
||||
# 任务线程池
|
||||
task_thread_pool = ThreadPoolExecutor(max_workers=4)
|
||||
from .midjourney import MJBot
|
||||
from bridge import bridge
|
||||
|
||||
|
||||
@plugins.register(
|
||||
@@ -66,11 +57,28 @@ class LinkAI(Plugin):
|
||||
if len(cmd) == 1 or (len(cmd) == 2 and cmd[1] == "help"):
|
||||
_set_reply_text(self.get_help_text(verbose=True), e_context, level=ReplyType.INFO)
|
||||
return
|
||||
|
||||
if len(cmd) == 2 and (cmd[1] == "open" or cmd[1] == "close"):
|
||||
# 知识库开关指令
|
||||
if not _is_admin(e_context):
|
||||
_set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
|
||||
return
|
||||
is_open = True
|
||||
tips_text = "开启"
|
||||
if cmd[1] == "close":
|
||||
tips_text = "关闭"
|
||||
is_open = False
|
||||
conf()["use_linkai"] = is_open
|
||||
bridge.Bridge().reset_bot()
|
||||
_set_reply_text(f"知识库功能已{tips_text}", e_context, level=ReplyType.INFO)
|
||||
return
|
||||
|
||||
if len(cmd) == 3 and cmd[1] == "app":
|
||||
# 知识库应用切换指令
|
||||
if not context.kwargs.get("isgroup"):
|
||||
_set_reply_text("该指令需在群聊中使用", e_context, level=ReplyType.ERROR)
|
||||
return
|
||||
if context.kwargs.get("msg").actual_user_id not in global_config["admin_users"]:
|
||||
if not _is_admin(e_context):
|
||||
_set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
|
||||
return
|
||||
app_code = cmd[2]
|
||||
@@ -84,7 +92,8 @@ class LinkAI(Plugin):
|
||||
super().save_config(self.config)
|
||||
_set_reply_text(f"应用设置成功: {app_code}", e_context, level=ReplyType.INFO)
|
||||
else:
|
||||
_set_reply_text(f"指令错误,请输入{_get_trigger_prefix()}linkai help 获取帮助", e_context, level=ReplyType.INFO)
|
||||
_set_reply_text(f"指令错误,请输入{_get_trigger_prefix()}linkai help 获取帮助", e_context,
|
||||
level=ReplyType.INFO)
|
||||
return
|
||||
|
||||
# LinkAI 对话任务处理
|
||||
@@ -121,12 +130,30 @@ class LinkAI(Plugin):
|
||||
help_text = "用于集成 LinkAI 提供的知识库、Midjourney绘画等能力。\n\n"
|
||||
if not verbose:
|
||||
return help_text
|
||||
help_text += f'📖 知识库\n - 群聊中指定应用: {trigger_prefix}linkai app 应用编码\n\n例如: \n"$linkai app Kv2fXJcH"\n\n'
|
||||
help_text += f"🎨 绘画\n - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 1105592717188272288 2\""
|
||||
help_text += f'📖 知识库\n - 群聊中指定应用: {trigger_prefix}linkai app 应用编码\n'
|
||||
help_text += f' - {trigger_prefix}linkai open: 开启对话\n'
|
||||
help_text += f' - {trigger_prefix}linkai close: 关闭对话\n'
|
||||
help_text += f'\n例如: \n"{trigger_prefix}linkai app Kv2fXJcH"\n\n'
|
||||
help_text += f"🎨 绘画\n - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n - 变换: {trigger_prefix}mjv 图片ID 图片序号\n - 重置: {trigger_prefix}mjr 图片ID"
|
||||
help_text += f"\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 11055927171882 2\""
|
||||
help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\""
|
||||
return help_text
|
||||
|
||||
|
||||
# 静态方法
|
||||
def _is_admin(e_context: EventContext) -> bool:
|
||||
"""
|
||||
判断消息是否由管理员用户发送
|
||||
:param e_context: 消息上下文
|
||||
:return: True: 是, False: 否
|
||||
"""
|
||||
context = e_context["context"]
|
||||
if context["isgroup"]:
|
||||
return context.kwargs.get("msg").actual_user_id in global_config["admin_users"]
|
||||
else:
|
||||
return context["receiver"] in global_config["admin_users"]
|
||||
|
||||
|
||||
def _set_reply_text(content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
|
||||
reply = Reply(level, content)
|
||||
e_context["reply"] = reply
|
||||
|
||||
@@ -11,6 +11,9 @@ from bridge.context import ContextType
|
||||
from plugins import EventContext, EventAction
|
||||
|
||||
INVALID_REQUEST = 410
|
||||
NOT_FOUND_ORIGIN_IMAGE = 461
|
||||
NOT_FOUND_TASK = 462
|
||||
|
||||
|
||||
class TaskType(Enum):
|
||||
GENERATE = "generate"
|
||||
@@ -18,6 +21,9 @@ class TaskType(Enum):
|
||||
VARIATION = "variation"
|
||||
RESET = "reset"
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
|
||||
class Status(Enum):
|
||||
PENDING = "pending"
|
||||
@@ -34,6 +40,14 @@ class TaskMode(Enum):
|
||||
RELAX = "relax"
|
||||
|
||||
|
||||
task_name_mapping = {
|
||||
TaskType.GENERATE.name: "生成",
|
||||
TaskType.UPSCALE.name: "放大",
|
||||
TaskType.VARIATION.name: "变换",
|
||||
TaskType.RESET.name: "重新生成",
|
||||
}
|
||||
|
||||
|
||||
class MJTask:
|
||||
def __init__(self, id, user_id: str, task_type: TaskType, raw_prompt=None, expires: int = 60 * 30,
|
||||
status=Status.PENDING):
|
||||
@@ -69,7 +83,7 @@ class MJBot:
|
||||
:param e_context: 上下文
|
||||
:return: 任务类型枚举
|
||||
"""
|
||||
if not self.config or not self.config.get("enabled"):
|
||||
if not self.config:
|
||||
return None
|
||||
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
|
||||
context = e_context['context']
|
||||
@@ -79,6 +93,10 @@ class MJBot:
|
||||
return TaskType.GENERATE
|
||||
elif cmd_list[0].lower() == f"{trigger_prefix}mju":
|
||||
return TaskType.UPSCALE
|
||||
elif cmd_list[0].lower() == f"{trigger_prefix}mjv":
|
||||
return TaskType.VARIATION
|
||||
elif cmd_list[0].lower() == f"{trigger_prefix}mjr":
|
||||
return TaskType.RESET
|
||||
elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix"):
|
||||
return TaskType.GENERATE
|
||||
|
||||
@@ -92,9 +110,26 @@ class MJBot:
|
||||
session_id = context["session_id"]
|
||||
cmd = context.content.split(maxsplit=1)
|
||||
if len(cmd) == 1 and context.type == ContextType.TEXT:
|
||||
# midjourney 帮助指令
|
||||
self._set_reply_text(self.get_help_text(verbose=True), e_context, level=ReplyType.INFO)
|
||||
return
|
||||
|
||||
if len(cmd) == 2 and (cmd[1] == "open" or cmd[1] == "close"):
|
||||
# midjourney 开关指令
|
||||
is_open = True
|
||||
tips_text = "开启"
|
||||
if cmd[1] == "close":
|
||||
tips_text = "关闭"
|
||||
is_open = False
|
||||
self.config["enabled"] = is_open
|
||||
self._set_reply_text(f"Midjourney绘画已{tips_text}", e_context, level=ReplyType.INFO)
|
||||
return
|
||||
|
||||
if not self.config.get("enabled"):
|
||||
logger.warn("Midjourney绘画未开启,请查看 plugins/linkai/config.json 中的配置")
|
||||
self._set_reply_text(f"Midjourney绘画未开启", e_context, level=ReplyType.INFO)
|
||||
return
|
||||
|
||||
if not self._check_rate_limit(session_id, e_context):
|
||||
logger.warn("[MJ] midjourney task exceed rate limit")
|
||||
return
|
||||
@@ -110,8 +145,8 @@ class MJBot:
|
||||
e_context.action = EventAction.BREAK_PASS
|
||||
return
|
||||
|
||||
elif mj_type == TaskType.UPSCALE:
|
||||
# 图片放大
|
||||
elif mj_type == TaskType.UPSCALE or mj_type == TaskType.VARIATION:
|
||||
# 图片放大/变换
|
||||
clist = cmd[1].split()
|
||||
if len(clist) < 2:
|
||||
self._set_reply_text(f"{cmd[0]} 命令缺少参数", e_context)
|
||||
@@ -121,16 +156,27 @@ class MJBot:
|
||||
if index < 1 or index > 4:
|
||||
self._set_reply_text(f"图片序号 {index} 错误,应在 1 至 4 之间", e_context)
|
||||
return
|
||||
key = f"{TaskType.UPSCALE.name}_{img_id}_{index}"
|
||||
key = f"{str(mj_type)}_{img_id}_{index}"
|
||||
if self.temp_dict.get(key):
|
||||
self._set_reply_text(f"第 {index} 张图片已经放大过了", e_context)
|
||||
self._set_reply_text(f"第 {index} 张图片已经{task_name_mapping.get(str(mj_type))}过了", e_context)
|
||||
return
|
||||
# 图片放大操作
|
||||
reply = self.upscale(session_id, img_id, index, e_context)
|
||||
# 执行图片放大/变换操作
|
||||
reply = self.do_operate(mj_type, session_id, img_id, e_context, index)
|
||||
e_context['reply'] = reply
|
||||
e_context.action = EventAction.BREAK_PASS
|
||||
return
|
||||
|
||||
elif mj_type == TaskType.RESET:
|
||||
# 图片重新生成
|
||||
clist = cmd[1].split()
|
||||
if len(clist) < 1:
|
||||
self._set_reply_text(f"{cmd[0]} 命令缺少参数", e_context)
|
||||
return
|
||||
img_id = clist[0]
|
||||
# 图片重新生成
|
||||
reply = self.do_operate(mj_type, session_id, img_id, e_context)
|
||||
e_context['reply'] = reply
|
||||
e_context.action = EventAction.BREAK_PASS
|
||||
else:
|
||||
self._set_reply_text(f"暂不支持该命令", e_context)
|
||||
|
||||
@@ -180,9 +226,12 @@ class MJBot:
|
||||
reply = Reply(ReplyType.ERROR, "图片生成失败,请稍后再试")
|
||||
return reply
|
||||
|
||||
def upscale(self, user_id: str, img_id: str, index: int, e_context: EventContext) -> Reply:
|
||||
logger.info(f"[MJ] image upscale, img_id={img_id}, index={index}")
|
||||
body = {"type": TaskType.UPSCALE.name, "img_id": img_id, "index": index}
|
||||
def do_operate(self, task_type: TaskType, user_id: str, img_id: str, e_context: EventContext,
|
||||
index: int = None) -> Reply:
|
||||
logger.info(f"[MJ] image operate, task_type={task_type}, img_id={img_id}, index={index}")
|
||||
body = {"type": task_type.name, "img_id": img_id}
|
||||
if index:
|
||||
body["index"] = index
|
||||
if not self.config.get("img_proxy"):
|
||||
body["img_proxy"] = False
|
||||
res = requests.post(url=self.base_url + "/operate", json=body, headers=self.headers, timeout=(5, 40))
|
||||
@@ -191,23 +240,24 @@ class MJBot:
|
||||
res = res.json()
|
||||
if res.get("code") == 200:
|
||||
task_id = res.get("data").get("task_id")
|
||||
logger.info(f"[MJ] image upscale processing, task_id={task_id}")
|
||||
content = f"🔎图片正在放大中,请耐心等待"
|
||||
logger.info(f"[MJ] image operate processing, task_id={task_id}")
|
||||
icon_map = {TaskType.UPSCALE: "🔎", TaskType.VARIATION: "🪄", TaskType.RESET: "🔄"}
|
||||
content = f"{icon_map.get(task_type)}图片正在{task_name_mapping.get(task_type.name)}中,请耐心等待"
|
||||
reply = Reply(ReplyType.INFO, content)
|
||||
task = MJTask(id=task_id, status=Status.PENDING, user_id=user_id, task_type=TaskType.UPSCALE)
|
||||
task = MJTask(id=task_id, status=Status.PENDING, user_id=user_id, task_type=task_type)
|
||||
# put to memory dict
|
||||
self.tasks[task.id] = task
|
||||
key = f"{TaskType.UPSCALE.name}_{img_id}_{index}"
|
||||
key = f"{task_type.name}_{img_id}_{index}"
|
||||
self.temp_dict[key] = True
|
||||
# asyncio.run_coroutine_threadsafe(self.check_task(task, e_context), self.event_loop)
|
||||
self._do_check_task(task, e_context)
|
||||
return reply
|
||||
else:
|
||||
error_msg = ""
|
||||
if res.status_code == 461:
|
||||
if res.status_code == NOT_FOUND_ORIGIN_IMAGE:
|
||||
error_msg = "请输入正确的图片ID"
|
||||
res_json = res.json()
|
||||
logger.error(f"[MJ] upscale error, msg={res_json.get('message')}, status_code={res.status_code}")
|
||||
logger.error(f"[MJ] operate error, msg={res_json.get('message')}, status_code={res.status_code}")
|
||||
reply = Reply(ReplyType.ERROR, error_msg or "图片生成失败,请稍后再试")
|
||||
return reply
|
||||
|
||||
@@ -241,40 +291,6 @@ class MJBot:
|
||||
if self.tasks.get(task.id):
|
||||
self.tasks[task.id].status = Status.EXPIRED
|
||||
|
||||
async def check_task_async(self, task: MJTask, e_context: EventContext):
|
||||
try:
|
||||
logger.debug(f"[MJ] start check task status, {task}")
|
||||
max_retry_times = 90
|
||||
while max_retry_times > 0:
|
||||
await asyncio.sleep(10)
|
||||
async with aiohttp.ClientSession() as session:
|
||||
url = f"{self.base_url}/tasks/{task.id}"
|
||||
try:
|
||||
async with session.get(url, headers=self.headers) as res:
|
||||
if res.status == 200:
|
||||
res_json = await res.json()
|
||||
logger.debug(f"[MJ] task check res, task_id={task.id}, status={res.status}, "
|
||||
f"data={res_json.get('data')}, thread={threading.current_thread().name}")
|
||||
if res_json.get("data") and res_json.get("data").get("status") == Status.FINISHED.name:
|
||||
# process success res
|
||||
if self.tasks.get(task.id):
|
||||
self.tasks[task.id].status = Status.FINISHED
|
||||
self._process_success_task(task, res_json.get("data"), e_context)
|
||||
return
|
||||
else:
|
||||
res_json = await res.json()
|
||||
logger.warn(f"[MJ] image check error, status_code={res.status}, res={res_json}")
|
||||
max_retry_times -= 20
|
||||
except Exception as e:
|
||||
max_retry_times -= 20
|
||||
logger.warn(e)
|
||||
max_retry_times -= 1
|
||||
logger.warn("[MJ] end from poll")
|
||||
if self.tasks.get(task.id):
|
||||
self.tasks[task.id].status = Status.EXPIRED
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
|
||||
def _do_check_task(self, task: MJTask, e_context: EventContext):
|
||||
threading.Thread(target=self.check_task_sync, args=(task, e_context)).start()
|
||||
|
||||
@@ -299,10 +315,17 @@ class MJBot:
|
||||
# send info
|
||||
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
|
||||
text = ""
|
||||
if task.task_type == TaskType.GENERATE:
|
||||
text = f"🎨绘画完成!\nprompt: {task.raw_prompt}\n- - - - - - - - -\n图片ID: {task.img_id}"
|
||||
text += f"\n\n🔎可使用 {trigger_prefix}mju 命令放大指定图片\n"
|
||||
if task.task_type == TaskType.GENERATE or task.task_type == TaskType.VARIATION or task.task_type == TaskType.RESET:
|
||||
text = f"🎨绘画完成!\n"
|
||||
if task.raw_prompt:
|
||||
text += f"prompt: {task.raw_prompt}\n"
|
||||
text += f"- - - - - - - - -\n图片ID: {task.img_id}"
|
||||
text += f"\n\n🔎使用 {trigger_prefix}mju 命令放大图片\n"
|
||||
text += f"例如:\n{trigger_prefix}mju {task.img_id} 1"
|
||||
text += f"\n\n🪄使用 {trigger_prefix}mjv 命令变换图片\n"
|
||||
text += f"例如:\n{trigger_prefix}mjv {task.img_id} 1"
|
||||
text += f"\n\n🔄使用 {trigger_prefix}mjr 命令重新生成图片\n"
|
||||
text += f"例如:\n{trigger_prefix}mjr {task.img_id}"
|
||||
reply = Reply(ReplyType.INFO, text)
|
||||
channel._send(reply, e_context["context"])
|
||||
|
||||
@@ -365,8 +388,9 @@ class MJBot:
|
||||
help_text = "🎨利用Midjourney进行画图\n\n"
|
||||
if not verbose:
|
||||
return help_text
|
||||
help_text += f" - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 1105592717188272288 2\""
|
||||
|
||||
help_text += f" - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n - 变换: mjv 图片ID 图片序号\n - 重置: mjr 图片ID"
|
||||
help_text += f"\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 11055927171882 2\""
|
||||
help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\""
|
||||
return help_text
|
||||
|
||||
def find_tasks_by_user_id(self, user_id) -> list:
|
||||
|
||||
+1
-1
@@ -19,7 +19,7 @@ class Plugin:
|
||||
# 全局配置不存在 或者 未开启全局配置开关,则获取插件目录下的配置
|
||||
plugin_config_path = os.path.join(self.path, "config.json")
|
||||
if os.path.exists(plugin_config_path):
|
||||
with open(plugin_config_path, "r") as f:
|
||||
with open(plugin_config_path, "r", encoding="utf-8") as f:
|
||||
plugin_conf = json.load(f)
|
||||
logger.debug(f"loading plugin config, plugin_name={self.name}, conf={plugin_conf}")
|
||||
return plugin_conf
|
||||
|
||||
Reference in New Issue
Block a user