mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-05-07 11:59:23 +08:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cad3b691a9 | |||
| bac21426d3 | |||
| c4a35314cd | |||
| 7090722565 | |||
| 6d972c7c18 | |||
| 6961a88feb | |||
| c41ec13984 | |||
| ca8e06e562 | |||
| 200cd33a8e | |||
| 1da7991c65 | |||
| fdfb7e369a | |||
| c2b01cc957 | |||
| 5de8e94bb4 | |||
| 7a2c15d912 |
@@ -5,7 +5,7 @@
|
||||
最新版本支持的功能如下:
|
||||
|
||||
- ✅ **多端部署:** 有多种部署方式可选择且功能完备,目前已支持微信公众号、企业微信应用、飞书、钉钉等部署方式
|
||||
- ✅ **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, GPT-4o, Claude-3, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4,Kimi(月之暗面)
|
||||
- ✅ **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, GPT-4o, Claude-3, Gemini, 文心一言, 讯飞星火, 通义千问,ChatGLM-4,Kimi(月之暗面), MiniMax
|
||||
- ✅ **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型
|
||||
- ✅ **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, CogView-3, vision模型
|
||||
- ✅ **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话、联网搜索等插件
|
||||
@@ -42,6 +42,8 @@
|
||||
|
||||
# 🏷 更新日志
|
||||
|
||||
>**2024.06.20:** [1.6.7版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.7),MiniMax模型、工作流图片输入、模型列表完善
|
||||
>
|
||||
>**2024.06.04:** [1.6.6版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.6) 和 [1.6.5版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.5),gpt-4o模型、钉钉流式卡片、讯飞语音识别/合成
|
||||
|
||||
>**2024.04.26:** [1.6.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.0),新增 Kimi 接入、gpt-4-turbo版本升级、文件总结和语音识别问题修复
|
||||
@@ -80,7 +82,7 @@
|
||||
|
||||
> 默认对话模型是 openai 的 gpt-3.5-turbo,计费方式是约每 1000tokens (约750个英文单词 或 500汉字,包含请求和回复) 消耗 $0.002,图片生成是Dell E模型,每张消耗 $0.016。
|
||||
|
||||
项目同时也支持使用 LinkAI 接口,无需代理,可使用 文心、讯飞、GPT-3、GPT-4 等模型,支持 定制化知识库、联网搜索、MJ绘图、文档总结和对话等能力。修改配置即可一键切换,参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。
|
||||
项目同时也支持使用 LinkAI 接口,无需代理,可使用 Kimi、文心、讯飞、GPT-3.5、GPT-4o 等模型,支持 定制化知识库、联网搜索、MJ绘图、文档总结、工作流等能力。修改配置即可一键使用,参考 [接入文档](https://link-ai.tech/platform/link-app/wechat)。
|
||||
|
||||
### 2.运行环境
|
||||
|
||||
@@ -167,7 +169,7 @@ pip3 install -r requirements-optional.txt
|
||||
|
||||
**4.其他配置**
|
||||
|
||||
+ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `gpt-4o`, `gpt-4-turbo`, `gpt-4`, `wenxin` , `claude` , `gemini`, `glm-4`, `xunfei`, `moonshot`
|
||||
+ `model`: 模型名称,目前支持 `gpt-3.5-turbo`, `gpt-4o`, `gpt-4-turbo`, `gpt-4`, `wenxin` , `claude` , `gemini`, `glm-4`, `xunfei`, `moonshot`等,全部模型名称参考[common/const.py](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/common/const.py)文件
|
||||
+ `temperature`,`frequency_penalty`,`presence_penalty`: Chat API接口参数,详情参考[OpenAI官方文档。](https://platform.openai.com/docs/api-reference/chat)
|
||||
+ `proxy`:由于目前 `openai` 接口国内无法访问,需配置代理客户端的地址,详情参考 [#351](https://github.com/zhayujie/chatgpt-on-wechat/issues/351)
|
||||
+ 对于图像生成,在满足个人或群组触发条件外,还需要额外的关键词前缀来触发,对应配置 `image_create_prefix `
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import requests, json
|
||||
import requests
|
||||
import json
|
||||
from common import const
|
||||
from bot.bot import Bot
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
@@ -16,9 +18,15 @@ class BaiduWenxinBot(Bot):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
wenxin_model = conf().get("baidu_wenxin_model") or "eb-instant"
|
||||
if conf().get("model") and conf().get("model") == "wenxin-4":
|
||||
wenxin_model = "completions_pro"
|
||||
wenxin_model = conf().get("baidu_wenxin_model")
|
||||
if wenxin_model is not None:
|
||||
wenxin_model = conf().get("baidu_wenxin_model") or "eb-instant"
|
||||
else:
|
||||
if conf().get("model") and conf().get("model") == const.WEN_XIN:
|
||||
wenxin_model = "completions"
|
||||
elif conf().get("model") and conf().get("model") == const.WEN_XIN_4:
|
||||
wenxin_model = "completions_pro"
|
||||
|
||||
self.sessions = SessionManager(BaiduWenxinSession, model=wenxin_model)
|
||||
|
||||
def reply(self, query, context=None):
|
||||
|
||||
+4
-1
@@ -2,7 +2,6 @@
|
||||
channel factory
|
||||
"""
|
||||
from common import const
|
||||
from common.log import logger
|
||||
|
||||
|
||||
def create_bot(bot_type):
|
||||
@@ -64,6 +63,10 @@ def create_bot(bot_type):
|
||||
elif bot_type == const.MOONSHOT:
|
||||
from bot.moonshot.moonshot_bot import MoonshotBot
|
||||
return MoonshotBot()
|
||||
|
||||
elif bot_type == const.MiniMax:
|
||||
from bot.minimax.minimax_bot import MinimaxBot
|
||||
return MinimaxBot()
|
||||
|
||||
|
||||
raise RuntimeError
|
||||
|
||||
@@ -0,0 +1,151 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import time
|
||||
|
||||
import openai
|
||||
import openai.error
|
||||
from bot.bot import Bot
|
||||
from bot.minimax.minimax_session import MinimaxSession
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import Context, ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from config import conf, load_config
|
||||
from bot.chatgpt.chat_gpt_session import ChatGPTSession
|
||||
import requests
|
||||
from common import const
|
||||
|
||||
|
||||
# ZhipuAI对话模型API
|
||||
class MinimaxBot(Bot):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.args = {
|
||||
"model": conf().get("model") or "abab6.5", # 对话模型的名称
|
||||
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
|
||||
"top_p": conf().get("top_p", 0.95), # 使用默认值
|
||||
}
|
||||
self.api_key = conf().get("Minimax_api_key")
|
||||
self.group_id = conf().get("Minimax_group_id")
|
||||
self.base_url = conf().get("Minimax_base_url", f"https://api.minimax.chat/v1/text/chatcompletion_pro?GroupId={self.group_id}")
|
||||
# tokens_to_generate/bot_setting/reply_constraints可自行修改
|
||||
self.request_body = {
|
||||
"model": self.args["model"],
|
||||
"tokens_to_generate": 2048,
|
||||
"reply_constraints": {"sender_type": "BOT", "sender_name": "MM智能助理"},
|
||||
"messages": [],
|
||||
"bot_setting": [
|
||||
{
|
||||
"bot_name": "MM智能助理",
|
||||
"content": "MM智能助理是一款由MiniMax自研的,没有调用其他产品的接口的大型语言模型。MiniMax是一家中国科技公司,一直致力于进行大模型相关的研究。",
|
||||
}
|
||||
],
|
||||
}
|
||||
self.sessions = SessionManager(MinimaxSession, model=const.MiniMax)
|
||||
|
||||
def reply(self, query, context: Context = None) -> Reply:
|
||||
# acquire reply content
|
||||
logger.info("[Minimax_AI] query={}".format(query))
|
||||
if context.type == ContextType.TEXT:
|
||||
session_id = context["session_id"]
|
||||
reply = None
|
||||
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
|
||||
if query in clear_memory_commands:
|
||||
self.sessions.clear_session(session_id)
|
||||
reply = Reply(ReplyType.INFO, "记忆已清除")
|
||||
elif query == "#清除所有":
|
||||
self.sessions.clear_all_session()
|
||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
|
||||
elif query == "#更新配置":
|
||||
load_config()
|
||||
reply = Reply(ReplyType.INFO, "配置已更新")
|
||||
if reply:
|
||||
return reply
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
logger.debug("[Minimax_AI] session query={}".format(session))
|
||||
|
||||
model = context.get("Minimax_model")
|
||||
new_args = self.args.copy()
|
||||
if model:
|
||||
new_args["model"] = model
|
||||
# if context.get('stream'):
|
||||
# # reply in stream
|
||||
# return self.reply_text_stream(query, new_query, session_id)
|
||||
|
||||
reply_content = self.reply_text(session, args=new_args)
|
||||
logger.debug(
|
||||
"[Minimax_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
|
||||
session.messages,
|
||||
session_id,
|
||||
reply_content["content"],
|
||||
reply_content["completion_tokens"],
|
||||
)
|
||||
)
|
||||
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
elif reply_content["completion_tokens"] > 0:
|
||||
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
|
||||
reply = Reply(ReplyType.TEXT, reply_content["content"])
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
logger.debug("[Minimax_AI] reply {} used 0 tokens.".format(reply_content))
|
||||
return reply
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
|
||||
return reply
|
||||
|
||||
def reply_text(self, session: MinimaxSession, args=None, retry_count=0) -> dict:
|
||||
"""
|
||||
call openai's ChatCompletion to get the answer
|
||||
:param session: a conversation session
|
||||
:param session_id: session id
|
||||
:param retry_count: retry count
|
||||
:return: {}
|
||||
"""
|
||||
try:
|
||||
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + self.api_key}
|
||||
self.request_body["messages"].extend(session.messages)
|
||||
logger.info("[Minimax_AI] request_body={}".format(self.request_body))
|
||||
# logger.info("[Minimax_AI] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
|
||||
res = requests.post(self.base_url, headers=headers, json=self.request_body)
|
||||
|
||||
# self.request_body["messages"].extend(response.json()["choices"][0]["messages"])
|
||||
if res.status_code == 200:
|
||||
response = res.json()
|
||||
return {
|
||||
"total_tokens": response["usage"]["total_tokens"],
|
||||
"completion_tokens": response["usage"]["total_tokens"],
|
||||
"content": response["reply"],
|
||||
}
|
||||
else:
|
||||
response = res.json()
|
||||
error = response.get("error")
|
||||
logger.error(f"[Minimax_AI] chat failed, status_code={res.status_code}, " f"msg={error.get('message')}, type={error.get('type')}")
|
||||
|
||||
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
|
||||
need_retry = False
|
||||
if res.status_code >= 500:
|
||||
# server error, need retry
|
||||
logger.warn(f"[Minimax_AI] do retry, times={retry_count}")
|
||||
need_retry = retry_count < 2
|
||||
elif res.status_code == 401:
|
||||
result["content"] = "授权失败,请检查API Key是否正确"
|
||||
elif res.status_code == 429:
|
||||
result["content"] = "请求过于频繁,请稍后再试"
|
||||
need_retry = retry_count < 2
|
||||
else:
|
||||
need_retry = False
|
||||
|
||||
if need_retry:
|
||||
time.sleep(3)
|
||||
return self.reply_text(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if need_retry:
|
||||
return self.reply_text(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
@@ -0,0 +1,72 @@
|
||||
from bot.session_manager import Session
|
||||
from common.log import logger
|
||||
|
||||
"""
|
||||
e.g.
|
||||
[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who won the world series in 2020?"},
|
||||
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
|
||||
{"role": "user", "content": "Where was it played?"}
|
||||
]
|
||||
"""
|
||||
|
||||
|
||||
class MinimaxSession(Session):
|
||||
def __init__(self, session_id, system_prompt=None, model="minimax"):
|
||||
super().__init__(session_id, system_prompt)
|
||||
self.model = model
|
||||
# self.reset()
|
||||
|
||||
def add_query(self, query):
|
||||
user_item = {"sender_type": "USER", "sender_name": self.session_id, "text": query}
|
||||
self.messages.append(user_item)
|
||||
|
||||
def add_reply(self, reply):
|
||||
assistant_item = {"sender_type": "BOT", "sender_name": "MM智能助理", "text": reply}
|
||||
self.messages.append(assistant_item)
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
precise = True
|
||||
try:
|
||||
cur_tokens = self.calc_tokens()
|
||||
except Exception as e:
|
||||
precise = False
|
||||
if cur_tokens is None:
|
||||
raise e
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
while cur_tokens > max_tokens:
|
||||
if len(self.messages) > 2:
|
||||
self.messages.pop(1)
|
||||
elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "BOT":
|
||||
self.messages.pop(1)
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
break
|
||||
elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "USER":
|
||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
|
||||
break
|
||||
else:
|
||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
|
||||
break
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
return cur_tokens
|
||||
|
||||
def calc_tokens(self):
|
||||
return num_tokens_from_messages(self.messages, self.model)
|
||||
|
||||
|
||||
def num_tokens_from_messages(messages, model):
|
||||
"""Returns the number of tokens used by a list of messages."""
|
||||
# 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词"
|
||||
# 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html
|
||||
# 目前根据字符串长度粗略估计token数,不影响正常使用
|
||||
tokens = 0
|
||||
for msg in messages:
|
||||
tokens += len(msg["text"])
|
||||
return tokens
|
||||
+36
-29
@@ -19,38 +19,45 @@ class Bridge(object):
|
||||
"translate": conf().get("translate", "baidu"),
|
||||
}
|
||||
# 这边取配置的模型
|
||||
model_type = conf().get("model") or const.GPT35
|
||||
if model_type in ["text-davinci-003"]:
|
||||
self.btype["chat"] = const.OPEN_AI
|
||||
if conf().get("use_azure_chatgpt", False):
|
||||
self.btype["chat"] = const.CHATGPTONAZURE
|
||||
if model_type in ["wenxin", "wenxin-4"]:
|
||||
self.btype["chat"] = const.BAIDU
|
||||
if model_type in ["xunfei"]:
|
||||
self.btype["chat"] = const.XUNFEI
|
||||
if model_type in [const.QWEN]:
|
||||
self.btype["chat"] = const.QWEN
|
||||
if model_type in [const.QWEN_TURBO, const.QWEN_PLUS, const.QWEN_MAX]:
|
||||
self.btype["chat"] = const.QWEN_DASHSCOPE
|
||||
if model_type in [const.GEMINI]:
|
||||
self.btype["chat"] = const.GEMINI
|
||||
if model_type in [const.ZHIPU_AI]:
|
||||
self.btype["chat"] = const.ZHIPU_AI
|
||||
if model_type and model_type.startswith("claude-3"):
|
||||
self.btype["chat"] = const.CLAUDEAPI
|
||||
bot_type = conf().get("bot_type")
|
||||
if bot_type:
|
||||
self.btype["chat"] = bot_type
|
||||
else:
|
||||
model_type = conf().get("model") or const.GPT35
|
||||
if model_type in ["text-davinci-003"]:
|
||||
self.btype["chat"] = const.OPEN_AI
|
||||
if conf().get("use_azure_chatgpt", False):
|
||||
self.btype["chat"] = const.CHATGPTONAZURE
|
||||
if model_type in ["wenxin", "wenxin-4"]:
|
||||
self.btype["chat"] = const.BAIDU
|
||||
if model_type in ["xunfei"]:
|
||||
self.btype["chat"] = const.XUNFEI
|
||||
if model_type in [const.QWEN]:
|
||||
self.btype["chat"] = const.QWEN
|
||||
if model_type in [const.QWEN_TURBO, const.QWEN_PLUS, const.QWEN_MAX]:
|
||||
self.btype["chat"] = const.QWEN_DASHSCOPE
|
||||
if model_type in [const.GEMINI]:
|
||||
self.btype["chat"] = const.GEMINI
|
||||
if model_type in [const.ZHIPU_AI]:
|
||||
self.btype["chat"] = const.ZHIPU_AI
|
||||
if model_type and model_type.startswith("claude-3"):
|
||||
self.btype["chat"] = const.CLAUDEAPI
|
||||
|
||||
if model_type in ["claude"]:
|
||||
self.btype["chat"] = const.CLAUDEAI
|
||||
if model_type in ["claude"]:
|
||||
self.btype["chat"] = const.CLAUDEAI
|
||||
|
||||
if model_type in ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
|
||||
self.btype["chat"] = const.MOONSHOT
|
||||
if model_type in ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
|
||||
self.btype["chat"] = const.MOONSHOT
|
||||
|
||||
if conf().get("use_linkai") and conf().get("linkai_api_key"):
|
||||
self.btype["chat"] = const.LINKAI
|
||||
if not conf().get("voice_to_text") or conf().get("voice_to_text") in ["openai"]:
|
||||
self.btype["voice_to_text"] = const.LINKAI
|
||||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]:
|
||||
self.btype["text_to_voice"] = const.LINKAI
|
||||
if model_type in ["abab6.5-chat"]:
|
||||
self.btype["chat"] = const.MiniMax
|
||||
|
||||
if conf().get("use_linkai") and conf().get("linkai_api_key"):
|
||||
self.btype["chat"] = const.LINKAI
|
||||
if not conf().get("voice_to_text") or conf().get("voice_to_text") in ["openai"]:
|
||||
self.btype["voice_to_text"] = const.LINKAI
|
||||
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]:
|
||||
self.btype["text_to_voice"] = const.LINKAI
|
||||
|
||||
self.bots = {}
|
||||
self.chat_bots = {}
|
||||
|
||||
@@ -78,6 +78,7 @@ class TerminalChannel(ChatChannel):
|
||||
prompt = trigger_prefixs[0] + prompt # 给没触发的消息加上触发前缀
|
||||
|
||||
context = self._compose_context(ContextType.TEXT, prompt, msg=TerminalMessage(msg_id, prompt))
|
||||
context["isgroup"] = False
|
||||
if context:
|
||||
self.produce(context)
|
||||
else:
|
||||
|
||||
+42
-17
@@ -1,44 +1,69 @@
|
||||
# bot_type
|
||||
OPEN_AI = "openAI"
|
||||
CHATGPT = "chatGPT"
|
||||
BAIDU = "baidu"
|
||||
BAIDU = "baidu" # 百度文心一言模型
|
||||
XUNFEI = "xunfei"
|
||||
CHATGPTONAZURE = "chatGPTOnAzure"
|
||||
LINKAI = "linkai"
|
||||
CLAUDEAI = "claude"
|
||||
CLAUDEAPI= "claudeAPI"
|
||||
QWEN = "qwen"
|
||||
CLAUDEAI = "claude" # 使用cookie的历史模型
|
||||
CLAUDEAPI= "claudeAPI" # 通过Claude api调用模型
|
||||
QWEN = "qwen" # 旧版通义模型
|
||||
QWEN_DASHSCOPE = "dashscope" # 通义新版sdk和api key
|
||||
|
||||
QWEN_DASHSCOPE = "dashscope"
|
||||
QWEN_TURBO = "qwen-turbo"
|
||||
QWEN_PLUS = "qwen-plus"
|
||||
QWEN_MAX = "qwen-max"
|
||||
|
||||
GEMINI = "gemini"
|
||||
ZHIPU_AI = "glm-4"
|
||||
MOONSHOT = "moonshot"
|
||||
MiniMax = "minimax"
|
||||
|
||||
|
||||
# model
|
||||
CLAUDE3 = "claude-3-opus-20240229"
|
||||
GPT35 = "gpt-3.5-turbo"
|
||||
GPT4 = "gpt-4"
|
||||
GPT35_0125 = "gpt-3.5-turbo-0125"
|
||||
GPT35_1106 = "gpt-3.5-turbo-1106"
|
||||
|
||||
GPT_4o = "gpt-4o"
|
||||
LINKAI_35 = "linkai-3.5"
|
||||
LINKAI_4_TURBO = "linkai-4-turbo"
|
||||
LINKAI_4o = "linkai-4o"
|
||||
GPT4_TURBO_PREVIEW = "gpt-4-turbo-2024-04-09"
|
||||
GPT4_TURBO = "gpt-4-turbo"
|
||||
GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview"
|
||||
GPT4_TURBO_04_09 = "gpt-4-turbo-2024-04-09"
|
||||
GPT4_TURBO_01_25 = "gpt-4-0125-preview"
|
||||
GPT4_TURBO_11_06 = "gpt-4-1106-preview"
|
||||
GPT4_VISION_PREVIEW = "gpt-4-vision-preview"
|
||||
|
||||
GPT4 = "gpt-4"
|
||||
GPT4_32k = "gpt-4-32k"
|
||||
GPT4_06_13 = "gpt-4-0613"
|
||||
GPT4_32k_06_13 = "gpt-4-32k-0613"
|
||||
|
||||
WHISPER_1 = "whisper-1"
|
||||
TTS_1 = "tts-1"
|
||||
TTS_1_HD = "tts-1-hd"
|
||||
|
||||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "claude-3-opus-20240229", "gpt-4-turbo",
|
||||
"gpt-4-turbo-preview", "gpt-4-1106-preview", GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT_4o, QWEN, GEMINI, ZHIPU_AI, MOONSHOT,
|
||||
QWEN_TURBO, QWEN_PLUS, QWEN_MAX, LINKAI_35, LINKAI_4_TURBO, LINKAI_4o]
|
||||
WEN_XIN = "wenxin"
|
||||
WEN_XIN_4 = "wenxin-4"
|
||||
|
||||
QWEN_TURBO = "qwen-turbo"
|
||||
QWEN_PLUS = "qwen-plus"
|
||||
QWEN_MAX = "qwen-max"
|
||||
|
||||
LINKAI_35 = "linkai-3.5"
|
||||
LINKAI_4_TURBO = "linkai-4-turbo"
|
||||
LINKAI_4o = "linkai-4o"
|
||||
|
||||
|
||||
MODEL_LIST = [
|
||||
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
|
||||
GPT_4o, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
|
||||
WEN_XIN, WEN_XIN_4,
|
||||
XUNFEI, GEMINI, ZHIPU_AI, MOONSHOT,
|
||||
"claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3-opus-20240229",
|
||||
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
|
||||
QWEN, QWEN_TURBO, QWEN_PLUS, QWEN_MAX,
|
||||
MiniMax,
|
||||
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o
|
||||
]
|
||||
|
||||
# channel
|
||||
FEISHU = "feishu"
|
||||
DINGTALK = "dingtalk"
|
||||
DINGTALK = "dingtalk"
|
||||
|
||||
@@ -17,7 +17,8 @@ available_setting = {
|
||||
"open_ai_api_base": "https://api.openai.com/v1",
|
||||
"proxy": "", # openai使用的代理
|
||||
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
|
||||
"model": "gpt-3.5-turbo", # 还支持 gpt-4, gpt-4-turbo, wenxin, xunfei, qwen
|
||||
"model": "gpt-3.5-turbo", # 支持ChatGPT、Claude、Gemini、文心一言、通义千问、Kimi、讯飞星火、智谱、LinkAI等模型,模型具体名称详见common/const.py文件列出的模型
|
||||
"bot_type": "", # 可选配置,使用兼容openai格式的三方服务时候,需填"chatGPT"。bot具体名称详见common/const.py文件列出的bot_type,如不填根据model名称判断,
|
||||
"use_azure_chatgpt": False, # 是否使用azure的chatgpt
|
||||
"azure_deployment_id": "", # azure 模型部署名称
|
||||
"azure_api_version": "", # azure api版本
|
||||
@@ -34,7 +35,7 @@ available_setting = {
|
||||
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
|
||||
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
|
||||
"nick_name_black_list": [], # 用户昵称黑名单
|
||||
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
|
||||
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
|
||||
"trigger_by_self": False, # 是否允许机器人触发
|
||||
"text_to_image": "dall-e-2", # 图片生成模型,可选 dall-e-2, dall-e-3
|
||||
# Azure OpenAI dall-e-3 配置
|
||||
@@ -48,7 +49,7 @@ available_setting = {
|
||||
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
|
||||
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序
|
||||
"image_create_size": "256x256", # 图片大小,可选有 256x256, 512x512, 1024x1024 (dall-e-3默认为1024x1024)
|
||||
"group_chat_exit_group": False,
|
||||
"group_chat_exit_group": False,
|
||||
# chatgpt会话参数
|
||||
"expires_in_seconds": 3600, # 无操作会话的过期时间
|
||||
# 人格描述
|
||||
@@ -76,14 +77,14 @@ available_setting = {
|
||||
"claude_api_cookie": "",
|
||||
"claude_uuid": "",
|
||||
# claude api key
|
||||
"claude_api_key":"",
|
||||
"claude_api_key": "",
|
||||
# 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html
|
||||
"qwen_access_key_id": "",
|
||||
"qwen_access_key_secret": "",
|
||||
"qwen_agent_key": "",
|
||||
"qwen_app_id": "",
|
||||
"qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串
|
||||
# 阿里灵积模型api key
|
||||
# 阿里灵积(通义新版sdk)模型api key
|
||||
"dashscope_api_key": "",
|
||||
# Google Gemini Api Key
|
||||
"gemini_api_key": "",
|
||||
@@ -108,8 +109,8 @@ available_setting = {
|
||||
"azure_voice_api_key": "",
|
||||
"azure_voice_region": "japaneast",
|
||||
# elevenlabs 语音api配置
|
||||
"xi_api_key": "", #获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
|
||||
"xi_voice_id": "", #ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
|
||||
"xi_api_key": "", # 获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
|
||||
"xi_voice_id": "", # ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
|
||||
# 服务时间限制,目前支持itchat
|
||||
"chat_time_module": False, # 是否开启服务时间限制
|
||||
"chat_start_time": "00:00", # 服务开始时间
|
||||
@@ -137,14 +138,12 @@ available_setting = {
|
||||
"wechatcomapp_secret": "", # 企业微信app的secret
|
||||
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
|
||||
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
|
||||
|
||||
# 飞书配置
|
||||
"feishu_port": 80, # 飞书bot监听端口
|
||||
"feishu_app_id": "", # 飞书机器人应用APP Id
|
||||
"feishu_app_secret": "", # 飞书机器人APP secret
|
||||
"feishu_token": "", # 飞书 verification token
|
||||
"feishu_bot_name": "", # 飞书机器人的名字
|
||||
|
||||
# 钉钉配置
|
||||
"dingtalk_client_id": "", # 钉钉机器人Client ID
|
||||
"dingtalk_client_secret": "", # 钉钉机器人Client Secret
|
||||
@@ -161,18 +160,21 @@ available_setting = {
|
||||
"plugin_trigger_prefix": "$", # 规范插件提供聊天相关指令的前缀,建议不要和管理员指令前缀"#"冲突
|
||||
# 是否使用全局插件配置
|
||||
"use_global_plugin_config": False,
|
||||
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
|
||||
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
|
||||
"media_send_interval": 1, # 发送图片的事件间隔,单位秒
|
||||
# 智谱AI 平台配置
|
||||
"zhipu_ai_api_key": "",
|
||||
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
|
||||
"moonshot_api_key": "",
|
||||
"moonshot_base_url":"https://api.moonshot.cn/v1/chat/completions",
|
||||
"moonshot_base_url": "https://api.moonshot.cn/v1/chat/completions",
|
||||
# LinkAI平台配置
|
||||
"use_linkai": False,
|
||||
"linkai_api_key": "",
|
||||
"linkai_app_code": "",
|
||||
"linkai_api_base": "https://api.link-ai.tech", # linkAI服务地址
|
||||
"Minimax_api_key": "",
|
||||
"Minimax_group_id": "",
|
||||
"Minimax_base_url": "",
|
||||
}
|
||||
|
||||
|
||||
@@ -346,6 +348,4 @@ def pconf(plugin_name: str) -> dict:
|
||||
|
||||
|
||||
# 全局配置,用于存放全局生效的状态
|
||||
global_config = {
|
||||
"admin_users": []
|
||||
}
|
||||
global_config = {"admin_users": []}
|
||||
|
||||
@@ -40,5 +40,22 @@
|
||||
"max_file_size": 5000,
|
||||
"type": ["FILE", "SHARING"]
|
||||
}
|
||||
},
|
||||
"hello": {
|
||||
"group_welc_fixed_msg": {
|
||||
"群聊1": "群聊1的固定欢迎语",
|
||||
"群聊2": "群聊2的固定欢迎语"
|
||||
},
|
||||
"group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。",
|
||||
|
||||
"group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。",
|
||||
|
||||
"patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。",
|
||||
|
||||
"use_character_desc": false
|
||||
},
|
||||
"Apilot": {
|
||||
"alapi_token": "xxx",
|
||||
"morning_news_text_enabled": false
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user