mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-05-08 04:11:31 +08:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 31ac80a074 | |||
| c8896450f6 | |||
| c662fa4c63 | |||
| db2ee802ca | |||
| c0616e7efa | |||
| 01660597e3 | |||
| c5b549f450 | |||
| 802d8457bb | |||
| c3a3df67b0 | |||
| 5798aeb3cd | |||
| cc81dd9172 | |||
| 66a014150b | |||
| 1da596639f | |||
| 76614ae9e5 | |||
| 6ddddffc0f | |||
| 5e399c46b1 | |||
| 8309f7cdbe | |||
| b8cc62ae95 | |||
| c0eb433fa2 | |||
| 7f857d66f6 |
@@ -1,10 +1,10 @@
|
||||
# 简介
|
||||
|
||||
> 本项目是基于大模型的智能对话机器人,支持微信、企业微信、公众号、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/文心一言/讯飞星火/通义千问/Gemini/LinkAI/ZhipuAI,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。
|
||||
> 本项目是基于大模型的智能对话机器人,支持企业微信、微信公众号、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/文心一言/讯飞星火/通义千问/Gemini/LinkAI/ZhipuAI,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。
|
||||
|
||||
最新版本支持的功能如下:
|
||||
|
||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信、微信公众号和、企业微信、飞书、钉钉等部署方式
|
||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持微信生态下公众号、企业微信应用、飞书、钉钉等部署方式
|
||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, Claude-3, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4
|
||||
- [x] **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型
|
||||
- [x] **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, CogView-3, vision模型
|
||||
@@ -51,7 +51,7 @@ SaaS服务、私有化部署、稳定托管接入 等多种模式。
|
||||
|
||||
>**2023.08.08:** 接入百度文心一言模型,通过 [插件](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/linkai) 支持 Midjourney 绘图
|
||||
|
||||
>**2023.06.12:** 接入 [LinkAI](https://link-ai.tech/console) 平台,可在线创建领域知识库,并接入微信、公众号及企业微信中,打造专属客服机器人。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。
|
||||
>**2023.06.12:** 接入 [LinkAI](https://link-ai.tech/console) 平台,可在线创建领域知识库,打造专属客服机器人。使用参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。
|
||||
|
||||
更早更新日志查看: [归档日志](/docs/version/old-version.md)
|
||||
|
||||
@@ -187,7 +187,7 @@ pip3 install -r requirements-optional.txt
|
||||
python3 app.py # windows环境下该命令通常为 python app.py
|
||||
```
|
||||
|
||||
终端输出二维码后,使用微信进行扫码,当输出 "Start auto replying" 时表示自动回复程序已经成功运行了(注意:用于登录的微信需要在支付处已完成实名认证)。扫码登录后你的账号就成为机器人了,可以在微信手机端通过配置的关键词触发自动回复 (任意好友发送消息给你,或是自己发消息给好友),参考[#142](https://github.com/zhayujie/chatgpt-on-wechat/issues/142)。
|
||||
终端输出二维码后,使用微信进行扫码,当输出 "Start auto replying" 时表示自动回复程序已经成功运行了(注意:用于登录的微信需要在支付处已完成实名认证)。扫码登录后你的账号就成为机器人了,可以在手机端通过配置的关键词触发自动回复 (任意好友发送消息给你,或是自己发消息给好友),参考[#142](https://github.com/zhayujie/chatgpt-on-wechat/issues/142)。
|
||||
|
||||
### 2.服务器部署
|
||||
|
||||
|
||||
+3
-1
@@ -50,7 +50,9 @@ def create_bot(bot_type):
|
||||
elif bot_type == const.QWEN:
|
||||
from bot.ali.ali_qwen_bot import AliQwenBot
|
||||
return AliQwenBot()
|
||||
|
||||
elif bot_type == const.QWEN_DASHSCOPE:
|
||||
from bot.dashscope.dashscope_bot import DashscopeBot
|
||||
return DashscopeBot()
|
||||
elif bot_type == const.GEMINI:
|
||||
from bot.gemini.google_gemini_bot import GoogleGeminiBot
|
||||
return GoogleGeminiBot()
|
||||
|
||||
@@ -0,0 +1,117 @@
|
||||
# encoding:utf-8
|
||||
|
||||
from bot.bot import Bot
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from config import conf, load_config
|
||||
from .dashscope_session import DashscopeSession
|
||||
import os
|
||||
import dashscope
|
||||
from http import HTTPStatus
|
||||
|
||||
|
||||
|
||||
dashscope_models = {
|
||||
"qwen-turbo": dashscope.Generation.Models.qwen_turbo,
|
||||
"qwen-plus": dashscope.Generation.Models.qwen_plus,
|
||||
"qwen-max": dashscope.Generation.Models.qwen_max,
|
||||
"qwen-bailian-v1": dashscope.Generation.Models.bailian_v1
|
||||
}
|
||||
# ZhipuAI对话模型API
|
||||
class DashscopeBot(Bot):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.sessions = SessionManager(DashscopeSession, model=conf().get("model") or "qwen-plus")
|
||||
self.model_name = conf().get("model") or "qwen-plus"
|
||||
self.api_key = conf().get("dashscope_api_key")
|
||||
os.environ["DASHSCOPE_API_KEY"] = self.api_key
|
||||
self.client = dashscope.Generation
|
||||
|
||||
def reply(self, query, context=None):
|
||||
# acquire reply content
|
||||
if context.type == ContextType.TEXT:
|
||||
logger.info("[DASHSCOPE] query={}".format(query))
|
||||
|
||||
session_id = context["session_id"]
|
||||
reply = None
|
||||
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
|
||||
if query in clear_memory_commands:
|
||||
self.sessions.clear_session(session_id)
|
||||
reply = Reply(ReplyType.INFO, "记忆已清除")
|
||||
elif query == "#清除所有":
|
||||
self.sessions.clear_all_session()
|
||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
|
||||
elif query == "#更新配置":
|
||||
load_config()
|
||||
reply = Reply(ReplyType.INFO, "配置已更新")
|
||||
if reply:
|
||||
return reply
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
logger.debug("[DASHSCOPE] session query={}".format(session.messages))
|
||||
|
||||
reply_content = self.reply_text(session)
|
||||
logger.debug(
|
||||
"[DASHSCOPE] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
|
||||
session.messages,
|
||||
session_id,
|
||||
reply_content["content"],
|
||||
reply_content["completion_tokens"],
|
||||
)
|
||||
)
|
||||
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
elif reply_content["completion_tokens"] > 0:
|
||||
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
|
||||
reply = Reply(ReplyType.TEXT, reply_content["content"])
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
logger.debug("[DASHSCOPE] reply {} used 0 tokens.".format(reply_content))
|
||||
return reply
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
|
||||
return reply
|
||||
|
||||
def reply_text(self, session: DashscopeSession, retry_count=0) -> dict:
|
||||
"""
|
||||
call openai's ChatCompletion to get the answer
|
||||
:param session: a conversation session
|
||||
:param session_id: session id
|
||||
:param retry_count: retry count
|
||||
:return: {}
|
||||
"""
|
||||
try:
|
||||
dashscope.api_key = self.api_key
|
||||
response = self.client.call(
|
||||
dashscope_models[self.model_name],
|
||||
messages=session.messages,
|
||||
result_format="message"
|
||||
)
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
content = response.output.choices[0]["message"]["content"]
|
||||
return {
|
||||
"total_tokens": response.usage["total_tokens"],
|
||||
"completion_tokens": response.usage["output_tokens"],
|
||||
"content": content,
|
||||
}
|
||||
else:
|
||||
logger.error('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
|
||||
response.request_id, response.status_code,
|
||||
response.code, response.message
|
||||
))
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if need_retry:
|
||||
return self.reply_text(session, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if need_retry:
|
||||
return self.reply_text(session, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
@@ -0,0 +1,51 @@
|
||||
from bot.session_manager import Session
|
||||
from common.log import logger
|
||||
|
||||
|
||||
class DashscopeSession(Session):
|
||||
def __init__(self, session_id, system_prompt=None, model="qwen-turbo"):
|
||||
super().__init__(session_id)
|
||||
self.reset()
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
precise = True
|
||||
try:
|
||||
cur_tokens = self.calc_tokens()
|
||||
except Exception as e:
|
||||
precise = False
|
||||
if cur_tokens is None:
|
||||
raise e
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
while cur_tokens > max_tokens:
|
||||
if len(self.messages) > 2:
|
||||
self.messages.pop(1)
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
|
||||
self.messages.pop(1)
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
break
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
|
||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
|
||||
break
|
||||
else:
|
||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
|
||||
len(self.messages)))
|
||||
break
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
return cur_tokens
|
||||
|
||||
def calc_tokens(self):
|
||||
return num_tokens_from_messages(self.messages)
|
||||
|
||||
|
||||
def num_tokens_from_messages(messages):
|
||||
# 只是大概,具体计算规则:https://help.aliyun.com/zh/dashscope/developer-reference/token-api?spm=a2c4g.11186623.0.0.4d8b12b0BkP3K9
|
||||
tokens = 0
|
||||
for msg in messages:
|
||||
tokens += len(msg["content"])
|
||||
return tokens
|
||||
@@ -7,7 +7,6 @@ import requests
|
||||
import config
|
||||
from bot.bot import Bot
|
||||
from bot.chatgpt.chat_gpt_session import ChatGPTSession
|
||||
from bot.gemini.google_gemini_bot import GoogleGeminiBot
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import Context, ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
|
||||
@@ -7,6 +7,8 @@ class ZhipuAISession(Session):
|
||||
super().__init__(session_id, system_prompt)
|
||||
self.model = model
|
||||
self.reset()
|
||||
if not system_prompt:
|
||||
logger.warn("[ZhiPu] `character_desc` can not be empty")
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
precise = True
|
||||
|
||||
@@ -30,6 +30,8 @@ class Bridge(object):
|
||||
self.btype["chat"] = const.XUNFEI
|
||||
if model_type in [const.QWEN]:
|
||||
self.btype["chat"] = const.QWEN
|
||||
if model_type in [const.QWEN_TURBO, const.QWEN_PLUS, const.QWEN_MAX]:
|
||||
self.btype["chat"] = const.QWEN_DASHSCOPE
|
||||
if model_type in [const.GEMINI]:
|
||||
self.btype["chat"] = const.GEMINI
|
||||
if model_type in [const.ZHIPU_AI]:
|
||||
|
||||
@@ -4,7 +4,6 @@ import threading
|
||||
import time
|
||||
from asyncio import CancelledError
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from concurrent import futures
|
||||
|
||||
from bridge.context import *
|
||||
from bridge.reply import *
|
||||
@@ -75,6 +74,7 @@ class ChatChannel(Channel):
|
||||
):
|
||||
session_id = group_id
|
||||
else:
|
||||
logger.debug(f"No need reply, groupName not in whitelist, group_name={group_name}")
|
||||
return None
|
||||
context["session_id"] = session_id
|
||||
context["receiver"] = group_id
|
||||
|
||||
@@ -32,13 +32,13 @@ class DingTalkMessage(ChatMessage):
|
||||
# 钉钉支持直接识别语音,所以此处将直接提取文字,当文字处理
|
||||
self.content = event.extensions['content']['recognition'].strip()
|
||||
self.ctype = ContextType.TEXT
|
||||
self.from_user_id = event.sender_id
|
||||
if self.is_group:
|
||||
self.from_user_id = event.conversation_id
|
||||
self.actual_user_id = event.sender_id
|
||||
else:
|
||||
self.from_user_id = event.sender_id
|
||||
self.to_user_id = event.chatbot_user_id
|
||||
self.other_user_nickname = event.conversation_title
|
||||
|
||||
user_id = event.sender_id
|
||||
nickname =event.sender_nick
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ def qrCallback(uuid, status, qrcode):
|
||||
print(qr_api4)
|
||||
print(qr_api2)
|
||||
print(qr_api1)
|
||||
_send_qr_code([qr_api1, qr_api2, qr_api3, qr_api4])
|
||||
_send_qr_code([qr_api3, qr_api4, qr_api2, qr_api1])
|
||||
qr = qrcode.QRCode(border=1)
|
||||
qr.add_data(url)
|
||||
qr.make(fit=True)
|
||||
@@ -233,6 +233,7 @@ class WechatChannel(ChatChannel):
|
||||
logger.info("[WX] sendImage url={}, receiver={}".format(img_url, receiver))
|
||||
elif reply.type == ReplyType.IMAGE: # 从文件读取图片
|
||||
image_storage = reply.content
|
||||
image_storage.seek(0)
|
||||
itchat.send_image(image_storage, toUserName=receiver)
|
||||
logger.info("[WX] sendImage, receiver={}".format(receiver))
|
||||
elif reply.type == ReplyType.FILE: # 新增文件回复类型
|
||||
|
||||
+8
-1
@@ -8,6 +8,12 @@ LINKAI = "linkai"
|
||||
CLAUDEAI = "claude"
|
||||
CLAUDEAPI= "claudeAPI"
|
||||
QWEN = "qwen"
|
||||
|
||||
QWEN_DASHSCOPE = "dashscope"
|
||||
QWEN_TURBO = "qwen-turbo"
|
||||
QWEN_PLUS = "qwen-plus"
|
||||
QWEN_MAX = "qwen-max"
|
||||
|
||||
GEMINI = "gemini"
|
||||
ZHIPU_AI = "glm-4"
|
||||
MOONSHOT = "moonshot"
|
||||
@@ -24,7 +30,8 @@ TTS_1 = "tts-1"
|
||||
TTS_1_HD = "tts-1-hd"
|
||||
|
||||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude","claude-3-opus-20240229", "gpt-4-turbo",
|
||||
"gpt-4-turbo-preview", "gpt-4-1106-preview", GPT4_TURBO_PREVIEW, QWEN, GEMINI, ZHIPU_AI, MOONSHOT]
|
||||
"gpt-4-turbo-preview", "gpt-4-1106-preview", GPT4_TURBO_PREVIEW, QWEN, GEMINI, ZHIPU_AI, MOONSHOT,
|
||||
QWEN_TURBO, QWEN_PLUS, QWEN_MAX]
|
||||
|
||||
# channel
|
||||
FEISHU = "feishu"
|
||||
|
||||
+56
-16
@@ -2,12 +2,14 @@ from bridge.context import Context, ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from linkai import LinkAIClient, PushMsg
|
||||
from config import conf, pconf, plugin_config
|
||||
from config import conf, pconf, plugin_config, available_setting
|
||||
from plugins import PluginManager
|
||||
import time
|
||||
|
||||
|
||||
chat_client: LinkAIClient
|
||||
|
||||
|
||||
class ChatClient(LinkAIClient):
|
||||
def __init__(self, api_key, host, channel):
|
||||
super().__init__(api_key, host)
|
||||
@@ -27,29 +29,67 @@ class ChatClient(LinkAIClient):
|
||||
def on_config(self, config: dict):
|
||||
if not self.client_id:
|
||||
return
|
||||
logger.info(f"从控制台加载配置: {config}")
|
||||
logger.info(f"[LinkAI] 从客户端管理加载远程配置: {config}")
|
||||
if config.get("enabled") != "Y":
|
||||
return
|
||||
|
||||
local_config = conf()
|
||||
for key in local_config.keys():
|
||||
if config.get(key) is not None:
|
||||
for key in config.keys():
|
||||
if key in available_setting and config.get(key) is not None:
|
||||
local_config[key] = config.get(key)
|
||||
if config.get("reply_voice_mode"):
|
||||
if config.get("reply_voice_mode") == "voice_reply_voice":
|
||||
# 语音配置
|
||||
reply_voice_mode = config.get("reply_voice_mode")
|
||||
if reply_voice_mode:
|
||||
if reply_voice_mode == "voice_reply_voice":
|
||||
local_config["voice_reply_voice"] = True
|
||||
elif config.get("reply_voice_mode") == "always_reply_voice":
|
||||
elif reply_voice_mode == "always_reply_voice":
|
||||
local_config["always_reply_voice"] = True
|
||||
# if config.get("admin_password") and plugin_config["Godcmd"]:
|
||||
# plugin_config["Godcmd"]["password"] = config.get("admin_password")
|
||||
# PluginManager().instances["Godcmd"].reload()
|
||||
# if config.get("group_app_map") and pconf("linkai"):
|
||||
# local_group_map = {}
|
||||
# for mapping in config.get("group_app_map"):
|
||||
# local_group_map[mapping.get("group_name")] = mapping.get("app_code")
|
||||
# pconf("linkai")["group_app_map"] = local_group_map
|
||||
# PluginManager().instances["linkai"].reload()
|
||||
|
||||
if config.get("admin_password") and plugin_config["Godcmd"]:
|
||||
plugin_config["Godcmd"]["password"] = config.get("admin_password")
|
||||
PluginManager().instances["GODCMD"].reload()
|
||||
|
||||
if config.get("group_app_map") and pconf("linkai"):
|
||||
local_group_map = {}
|
||||
for mapping in config.get("group_app_map"):
|
||||
local_group_map[mapping.get("group_name")] = mapping.get("app_code")
|
||||
pconf("linkai")["group_app_map"] = local_group_map
|
||||
PluginManager().instances["LINKAI"].reload()
|
||||
|
||||
|
||||
def start(channel):
|
||||
global chat_client
|
||||
chat_client = ChatClient(api_key=conf().get("linkai_api_key"),
|
||||
host="link-ai.chat", channel=channel)
|
||||
chat_client.config = _build_config()
|
||||
chat_client.start()
|
||||
time.sleep(1.5)
|
||||
if chat_client.client_id:
|
||||
logger.info("[LinkAI] 可前往控制台进行线上登录和配置:https://link-ai.tech/console/clients")
|
||||
|
||||
|
||||
def _build_config():
|
||||
local_conf = conf()
|
||||
config = {
|
||||
"linkai_app_code": local_conf.get("linkai_app_code"),
|
||||
"single_chat_prefix": local_conf.get("single_chat_prefix"),
|
||||
"single_chat_reply_prefix": local_conf.get("single_chat_reply_prefix"),
|
||||
"single_chat_reply_suffix": local_conf.get("single_chat_reply_suffix"),
|
||||
"group_chat_prefix": local_conf.get("group_chat_prefix"),
|
||||
"group_chat_reply_prefix": local_conf.get("group_chat_reply_prefix"),
|
||||
"group_chat_reply_suffix": local_conf.get("group_chat_reply_suffix"),
|
||||
"group_name_white_list": local_conf.get("group_name_white_list"),
|
||||
"nick_name_black_list": local_conf.get("nick_name_black_list"),
|
||||
"speech_recognition": "Y" if local_conf.get("speech_recognition") else "N",
|
||||
"text_to_image": local_conf.get("text_to_image"),
|
||||
"image_create_prefix": local_conf.get("image_create_prefix")
|
||||
}
|
||||
if local_conf.get("always_reply_voice"):
|
||||
config["reply_voice_mode"] = "always_reply_voice"
|
||||
elif local_conf.get("voice_reply_voice"):
|
||||
config["reply_voice_mode"] = "voice_reply_voice"
|
||||
if pconf("linkai"):
|
||||
config["group_app_map"] = pconf("linkai").get("group_app_map")
|
||||
if plugin_config.get("Godcmd"):
|
||||
config["admin_password"] = plugin_config.get("Godcmd").get("password")
|
||||
return config
|
||||
|
||||
@@ -75,6 +75,8 @@ available_setting = {
|
||||
"qwen_agent_key": "",
|
||||
"qwen_app_id": "",
|
||||
"qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串
|
||||
# 阿里灵积模型api key
|
||||
"dashscope_api_key": "",
|
||||
# Google Gemini Api Key
|
||||
"gemini_api_key": "",
|
||||
# wework的通用配置
|
||||
@@ -258,6 +260,8 @@ def load_config():
|
||||
config.load_user_datas()
|
||||
|
||||
|
||||
|
||||
|
||||
def get_root():
|
||||
return os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
## 插件说明
|
||||
|
||||
可以根据需求设置入群欢迎、群聊拍一拍、退群等消息的自定义提示词,也支持为每个群设置对应的固定欢迎语。
|
||||
|
||||
该插件也是用户根据需求开发自定义插件的示例插件,参考[插件开发说明](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)
|
||||
|
||||
## 插件配置
|
||||
|
||||
将 `plugins/hello` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`。 (如果未配置则会默认使用`config.json.template`模板中配置)。
|
||||
|
||||
以下是插件配置项说明:
|
||||
|
||||
```bash
|
||||
{
|
||||
"group_welc_fixed_msg": { ## 这里可以为特定群里配置特定的固定欢迎语
|
||||
"群聊1": "群聊1的固定欢迎语",
|
||||
"群聊2": "群聊2的固定欢迎语"
|
||||
},
|
||||
|
||||
"group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。", ## 群聊随机欢迎语的提示词
|
||||
|
||||
"group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。", ## 移出群聊的提示词
|
||||
|
||||
"patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。", ## 群内拍一拍的提示词
|
||||
|
||||
"use_character_desc": false ## 是否在Hello插件中使用LinkAI应用的系统设定
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
注意:
|
||||
|
||||
- 设置全局的用户进群固定欢迎语,可以在***项目根目录下***的`config.json`文件里,可以添加参数`"group_welcome_msg": "" `,参考 [#1482](https://github.com/zhayujie/chatgpt-on-wechat/pull/1482)
|
||||
- 为每个群设置固定的欢迎语,可以在`"group_welc_fixed_msg": {}`配置群聊名和对应的固定欢迎语,优先级高于全局固定欢迎语
|
||||
- 如果没有配置以上两个参数,则使用随机欢迎语,如需设定风格,语言等,修改`"group_welc_prompt": `即可
|
||||
- 如果使用LinkAI的服务,想在随机欢迎中结合LinkAI应用的设定,配置`"use_character_desc": true `
|
||||
- 实际 `config.json` 配置中应保证json格式,不应携带 '#' 及后面的注释
|
||||
- 如果是`docker`部署,可通过映射 `plugins/config.json` 到容器中来完成插件配置,参考[文档](https://github.com/zhayujie/chatgpt-on-wechat#3-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"group_welc_fixed_msg": {
|
||||
"群聊1": "群聊1的固定欢迎语",
|
||||
"群聊2": "群聊2的固定欢迎语"
|
||||
},
|
||||
|
||||
"group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。",
|
||||
|
||||
"group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。",
|
||||
|
||||
"patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。",
|
||||
|
||||
"use_character_desc": false
|
||||
}
|
||||
+41
-12
@@ -17,12 +17,29 @@ from config import conf
|
||||
version="0.1",
|
||||
author="lanvent",
|
||||
)
|
||||
|
||||
|
||||
class Hello(Plugin):
|
||||
|
||||
group_welc_prompt = "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。"
|
||||
group_exit_prompt = "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。"
|
||||
patpat_prompt = "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。"
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
|
||||
logger.info("[Hello] inited")
|
||||
self.config = super().load_config()
|
||||
try:
|
||||
self.config = super().load_config()
|
||||
if not self.config:
|
||||
self.config = self._load_config_template()
|
||||
self.group_welc_fixed_msg = self.config.get("group_welc_fixed_msg", {})
|
||||
self.group_welc_prompt = self.config.get("group_welc_prompt", self.group_welc_prompt)
|
||||
self.group_exit_prompt = self.config.get("group_exit_prompt", self.group_exit_prompt)
|
||||
self.patpat_prompt = self.config.get("patpat_prompt", self.patpat_prompt)
|
||||
logger.info("[Hello] inited")
|
||||
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
|
||||
except Exception as e:
|
||||
logger.error(f"[Hello]初始化异常:{e}")
|
||||
raise "[Hello] init failed, ignore "
|
||||
|
||||
def on_handle_context(self, e_context: EventContext):
|
||||
if e_context["context"].type not in [
|
||||
@@ -32,17 +49,21 @@ class Hello(Plugin):
|
||||
ContextType.EXIT_GROUP
|
||||
]:
|
||||
return
|
||||
msg: ChatMessage = e_context["context"]["msg"]
|
||||
group_name = msg.from_user_nickname
|
||||
if e_context["context"].type == ContextType.JOIN_GROUP:
|
||||
if "group_welcome_msg" in conf():
|
||||
if "group_welcome_msg" in conf() or group_name in self.group_welc_fixed_msg:
|
||||
reply = Reply()
|
||||
reply.type = ReplyType.TEXT
|
||||
reply.content = conf().get("group_welcome_msg", "")
|
||||
if group_name in self.group_welc_fixed_msg:
|
||||
reply.content = self.group_welc_fixed_msg.get(group_name, "")
|
||||
else:
|
||||
reply.content = conf().get("group_welcome_msg", "")
|
||||
e_context["reply"] = reply
|
||||
e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
|
||||
return
|
||||
e_context["context"].type = ContextType.TEXT
|
||||
msg: ChatMessage = e_context["context"]["msg"]
|
||||
e_context["context"].content = f'请你随机使用一种风格说一句问候语来欢迎新用户"{msg.actual_user_nickname}"加入群聊。'
|
||||
e_context["context"].content = self.group_welc_prompt.format(nickname=msg.actual_user_nickname)
|
||||
e_context.action = EventAction.BREAK # 事件结束,进入默认处理逻辑
|
||||
if not self.config or not self.config.get("use_character_desc"):
|
||||
e_context["context"]["generate_breaked_by"] = EventAction.BREAK
|
||||
@@ -51,8 +72,7 @@ class Hello(Plugin):
|
||||
if e_context["context"].type == ContextType.EXIT_GROUP:
|
||||
if conf().get("group_chat_exit_group"):
|
||||
e_context["context"].type = ContextType.TEXT
|
||||
msg: ChatMessage = e_context["context"]["msg"]
|
||||
e_context["context"].content = f'请你随机使用一种风格跟其他群用户说他违反规则"{msg.actual_user_nickname}"退出群聊。'
|
||||
e_context["context"].content = self.group_exit_prompt.format(nickname=msg.actual_user_nickname)
|
||||
e_context.action = EventAction.BREAK # 事件结束,进入默认处理逻辑
|
||||
return
|
||||
e_context.action = EventAction.BREAK
|
||||
@@ -60,8 +80,7 @@ class Hello(Plugin):
|
||||
|
||||
if e_context["context"].type == ContextType.PATPAT:
|
||||
e_context["context"].type = ContextType.TEXT
|
||||
msg: ChatMessage = e_context["context"]["msg"]
|
||||
e_context["context"].content = f"请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。"
|
||||
e_context["context"].content = self.patpat_prompt
|
||||
e_context.action = EventAction.BREAK # 事件结束,进入默认处理逻辑
|
||||
if not self.config or not self.config.get("use_character_desc"):
|
||||
e_context["context"]["generate_breaked_by"] = EventAction.BREAK
|
||||
@@ -72,7 +91,6 @@ class Hello(Plugin):
|
||||
if content == "Hello":
|
||||
reply = Reply()
|
||||
reply.type = ReplyType.TEXT
|
||||
msg: ChatMessage = e_context["context"]["msg"]
|
||||
if e_context["context"]["isgroup"]:
|
||||
reply.content = f"Hello, {msg.actual_user_nickname} from {msg.from_user_nickname}"
|
||||
else:
|
||||
@@ -96,3 +114,14 @@ class Hello(Plugin):
|
||||
def get_help_text(self, **kwargs):
|
||||
help_text = "输入Hello,我会回复你的名字\n输入End,我会回复你世界的图片\n"
|
||||
return help_text
|
||||
|
||||
def _load_config_template(self):
|
||||
logger.debug("No Hello plugin config.json, use plugins/hello/config.json.template")
|
||||
try:
|
||||
plugin_config_path = os.path.join(self.path, "config.json.template")
|
||||
if os.path.exists(plugin_config_path):
|
||||
with open(plugin_config_path, "r", encoding="utf-8") as f:
|
||||
plugin_conf = json.load(f)
|
||||
return plugin_conf
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
@@ -10,6 +10,7 @@ from common import const
|
||||
import os
|
||||
from .utils import Util
|
||||
|
||||
|
||||
@plugins.register(
|
||||
name="linkai",
|
||||
desc="A plugin that supports knowledge base and midjourney drawing.",
|
||||
@@ -32,7 +33,6 @@ class LinkAI(Plugin):
|
||||
self.sum_config = self.config.get("summary")
|
||||
logger.info(f"[LinkAI] inited, config={self.config}")
|
||||
|
||||
|
||||
def on_handle_context(self, e_context: EventContext):
|
||||
"""
|
||||
消息处理逻辑
|
||||
@@ -42,7 +42,8 @@ class LinkAI(Plugin):
|
||||
return
|
||||
|
||||
context = e_context['context']
|
||||
if context.type not in [ContextType.TEXT, ContextType.IMAGE, ContextType.IMAGE_CREATE, ContextType.FILE, ContextType.SHARING]:
|
||||
if context.type not in [ContextType.TEXT, ContextType.IMAGE, ContextType.IMAGE_CREATE, ContextType.FILE,
|
||||
ContextType.SHARING]:
|
||||
# filter content no need solve
|
||||
return
|
||||
|
||||
@@ -76,7 +77,8 @@ class LinkAI(Plugin):
|
||||
if not res:
|
||||
_set_reply_text("因为神秘力量无法获取文章内容,请稍后再试吧~", e_context, level=ReplyType.TEXT)
|
||||
return
|
||||
_set_reply_text(res.get("summary") + "\n\n💬 发送 \"开启对话\" 可以开启与文章内容的对话", e_context, level=ReplyType.TEXT)
|
||||
_set_reply_text(res.get("summary") + "\n\n💬 发送 \"开启对话\" 可以开启与文章内容的对话", e_context,
|
||||
level=ReplyType.TEXT)
|
||||
USER_FILE_MAP[_find_user_id(context) + "-sum_id"] = res.get("summary_id")
|
||||
return
|
||||
|
||||
@@ -99,7 +101,8 @@ class LinkAI(Plugin):
|
||||
_set_reply_text("开启对话失败,请稍后再试吧", e_context)
|
||||
return
|
||||
USER_FILE_MAP[_find_user_id(context) + "-file_id"] = res.get("file_id")
|
||||
_set_reply_text("💡你可以问我关于这篇文章的任何问题,例如:\n\n" + res.get("questions") + "\n\n发送 \"退出对话\" 可以关闭与文章的对话", e_context, level=ReplyType.TEXT)
|
||||
_set_reply_text("💡你可以问我关于这篇文章的任何问题,例如:\n\n" + res.get(
|
||||
"questions") + "\n\n发送 \"退出对话\" 可以关闭与文章的对话", e_context, level=ReplyType.TEXT)
|
||||
return
|
||||
|
||||
if context.type == ContextType.TEXT and context.content == "退出对话" and _find_file_id(context):
|
||||
@@ -117,12 +120,10 @@ class LinkAI(Plugin):
|
||||
e_context.action = EventAction.BREAK_PASS
|
||||
return
|
||||
|
||||
|
||||
if self._is_chat_task(e_context):
|
||||
# 文本对话任务处理
|
||||
self._process_chat_task(e_context)
|
||||
|
||||
|
||||
# 插件管理功能
|
||||
def _process_admin_cmd(self, e_context: EventContext):
|
||||
context = e_context['context']
|
||||
@@ -177,7 +178,9 @@ class LinkAI(Plugin):
|
||||
tips_text = "关闭"
|
||||
is_open = False
|
||||
if not self.sum_config:
|
||||
_set_reply_text(f"插件未启用summary功能,请参考以下链添加插件配置\n\nhttps://github.com/zhayujie/chatgpt-on-wechat/blob/master/plugins/linkai/README.md", e_context, level=ReplyType.INFO)
|
||||
_set_reply_text(
|
||||
f"插件未启用summary功能,请参考以下链添加插件配置\n\nhttps://github.com/zhayujie/chatgpt-on-wechat/blob/master/plugins/linkai/README.md",
|
||||
e_context, level=ReplyType.INFO)
|
||||
else:
|
||||
self.sum_config["enabled"] = is_open
|
||||
_set_reply_text(f"文章总结功能{tips_text}", e_context, level=ReplyType.INFO)
|
||||
@@ -254,6 +257,9 @@ class LinkAI(Plugin):
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
def reload(self):
|
||||
self.config = super().load_config()
|
||||
|
||||
|
||||
def _send_info(e_context: EventContext, content: str):
|
||||
reply = Reply(ReplyType.TEXT, content)
|
||||
@@ -273,15 +279,19 @@ def _set_reply_text(content: str, e_context: EventContext, level: ReplyType = Re
|
||||
e_context["reply"] = reply
|
||||
e_context.action = EventAction.BREAK_PASS
|
||||
|
||||
|
||||
def _get_trigger_prefix():
|
||||
return conf().get("plugin_trigger_prefix", "$")
|
||||
|
||||
|
||||
def _find_sum_id(context):
|
||||
return USER_FILE_MAP.get(_find_user_id(context) + "-sum_id")
|
||||
|
||||
|
||||
def _find_file_id(context):
|
||||
user_id = _find_user_id(context)
|
||||
if user_id:
|
||||
return USER_FILE_MAP.get(user_id + "-file_id")
|
||||
|
||||
|
||||
USER_FILE_MAP = ExpiredDict(conf().get("expires_in_seconds") or 60 * 30)
|
||||
|
||||
@@ -10,6 +10,7 @@ azure-cognitiveservices-speech # azure voice
|
||||
edge-tts # edge-tts
|
||||
numpy<=1.24.2
|
||||
langid # language detect
|
||||
elevenlabs==1.0.3 # elevenlabs TTS
|
||||
|
||||
#install plugin
|
||||
dulwich
|
||||
@@ -35,11 +36,11 @@ broadscope_bailian
|
||||
# google
|
||||
google-generativeai
|
||||
|
||||
# linkai
|
||||
linkai>=0.0.3.5
|
||||
|
||||
# dingtalk
|
||||
dingtalk_stream
|
||||
|
||||
# zhipuai
|
||||
zhipuai>=2.0.1
|
||||
|
||||
# tongyi qwen new sdk
|
||||
dashscope
|
||||
|
||||
+2
-1
@@ -6,4 +6,5 @@ requests>=2.28.2
|
||||
chardet>=5.1.0
|
||||
Pillow
|
||||
pre-commit
|
||||
web.py
|
||||
web.py
|
||||
linkai>=0.0.5.0
|
||||
|
||||
@@ -6,7 +6,7 @@ from common.log import logger
|
||||
try:
|
||||
import pysilk
|
||||
except ImportError:
|
||||
logger.warn("import pysilk failed, wechaty voice message will not be supported.")
|
||||
logger.debug("import pysilk failed, wechaty voice message will not be supported.")
|
||||
|
||||
from pydub import AudioSegment
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import time
|
||||
|
||||
from elevenlabs import set_api_key,generate
|
||||
|
||||
from elevenlabs.client import ElevenLabs
|
||||
from elevenlabs import save
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from common.tmp_dir import TmpDir
|
||||
@@ -9,7 +9,7 @@ from voice.voice import Voice
|
||||
from config import conf
|
||||
|
||||
XI_API_KEY = conf().get("xi_api_key")
|
||||
set_api_key(XI_API_KEY)
|
||||
client = ElevenLabs(api_key=XI_API_KEY)
|
||||
name = conf().get("xi_voice_id")
|
||||
|
||||
class ElevenLabsVoice(Voice):
|
||||
@@ -21,13 +21,12 @@ class ElevenLabsVoice(Voice):
|
||||
pass
|
||||
|
||||
def textToVoice(self, text):
|
||||
audio = generate(
|
||||
audio = client.generate(
|
||||
text=text,
|
||||
voice=name,
|
||||
model='eleven_multilingual_v1'
|
||||
model='eleven_multilingual_v2'
|
||||
)
|
||||
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
|
||||
with open(fileName, "wb") as f:
|
||||
f.write(audio)
|
||||
save(audio, fileName)
|
||||
logger.info("[ElevenLabs] textToVoice text={} voice file name={}".format(text, fileName))
|
||||
return Reply(ReplyType.VOICE, fileName)
|
||||
Reference in New Issue
Block a user