mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-03-19 21:38:18 +08:00
formatting code
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import requests
|
||||
|
||||
from bot.bot import Bot
|
||||
from bridge.reply import Reply, ReplyType
|
||||
|
||||
@@ -9,20 +10,35 @@ from bridge.reply import Reply, ReplyType
|
||||
class BaiduUnitBot(Bot):
|
||||
def reply(self, query, context=None):
|
||||
token = self.get_token()
|
||||
url = 'https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat?access_token=' + token
|
||||
post_data = "{\"version\":\"3.0\",\"service_id\":\"S73177\",\"session_id\":\"\",\"log_id\":\"7758521\",\"skill_ids\":[\"1221886\"],\"request\":{\"terminal_id\":\"88888\",\"query\":\"" + query + "\", \"hyper_params\": {\"chat_custom_bot_profile\": 1}}}"
|
||||
url = (
|
||||
"https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat?access_token="
|
||||
+ token
|
||||
)
|
||||
post_data = (
|
||||
'{"version":"3.0","service_id":"S73177","session_id":"","log_id":"7758521","skill_ids":["1221886"],"request":{"terminal_id":"88888","query":"'
|
||||
+ query
|
||||
+ '", "hyper_params": {"chat_custom_bot_profile": 1}}}'
|
||||
)
|
||||
print(post_data)
|
||||
headers = {'content-type': 'application/x-www-form-urlencoded'}
|
||||
headers = {"content-type": "application/x-www-form-urlencoded"}
|
||||
response = requests.post(url, data=post_data.encode(), headers=headers)
|
||||
if response:
|
||||
reply = Reply(ReplyType.TEXT, response.json()['result']['context']['SYS_PRESUMED_HIST'][1])
|
||||
reply = Reply(
|
||||
ReplyType.TEXT,
|
||||
response.json()["result"]["context"]["SYS_PRESUMED_HIST"][1],
|
||||
)
|
||||
return reply
|
||||
|
||||
def get_token(self):
|
||||
access_key = 'YOUR_ACCESS_KEY'
|
||||
secret_key = 'YOUR_SECRET_KEY'
|
||||
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + access_key + '&client_secret=' + secret_key
|
||||
access_key = "YOUR_ACCESS_KEY"
|
||||
secret_key = "YOUR_SECRET_KEY"
|
||||
host = (
|
||||
"https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id="
|
||||
+ access_key
|
||||
+ "&client_secret="
|
||||
+ secret_key
|
||||
)
|
||||
response = requests.get(host)
|
||||
if response:
|
||||
print(response.json())
|
||||
return response.json()['access_token']
|
||||
return response.json()["access_token"]
|
||||
|
||||
@@ -8,7 +8,7 @@ from bridge.reply import Reply
|
||||
|
||||
|
||||
class Bot(object):
|
||||
def reply(self, query, context : Context =None) -> Reply:
|
||||
def reply(self, query, context: Context = None) -> Reply:
|
||||
"""
|
||||
bot auto-reply content
|
||||
:param req: received message
|
||||
|
||||
@@ -13,20 +13,24 @@ def create_bot(bot_type):
|
||||
if bot_type == const.BAIDU:
|
||||
# Baidu Unit对话接口
|
||||
from bot.baidu.baidu_unit_bot import BaiduUnitBot
|
||||
|
||||
return BaiduUnitBot()
|
||||
|
||||
elif bot_type == const.CHATGPT:
|
||||
# ChatGPT 网页端web接口
|
||||
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
|
||||
|
||||
return ChatGPTBot()
|
||||
|
||||
elif bot_type == const.OPEN_AI:
|
||||
# OpenAI 官方对话模型API
|
||||
from bot.openai.open_ai_bot import OpenAIBot
|
||||
|
||||
return OpenAIBot()
|
||||
|
||||
elif bot_type == const.CHATGPTONAZURE:
|
||||
# Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
|
||||
from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
|
||||
|
||||
return AzureChatGPTBot()
|
||||
raise RuntimeError
|
||||
|
||||
@@ -1,42 +1,53 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import time
|
||||
|
||||
import openai
|
||||
import openai.error
|
||||
|
||||
from bot.bot import Bot
|
||||
from bot.chatgpt.chat_gpt_session import ChatGPTSession
|
||||
from bot.openai.open_ai_image import OpenAIImage
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from config import conf, load_config
|
||||
from common.log import logger
|
||||
from common.token_bucket import TokenBucket
|
||||
import openai
|
||||
import openai.error
|
||||
import time
|
||||
from config import conf, load_config
|
||||
|
||||
|
||||
# OpenAI对话模型API (可用)
|
||||
class ChatGPTBot(Bot,OpenAIImage):
|
||||
class ChatGPTBot(Bot, OpenAIImage):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# set the default api_key
|
||||
openai.api_key = conf().get('open_ai_api_key')
|
||||
if conf().get('open_ai_api_base'):
|
||||
openai.api_base = conf().get('open_ai_api_base')
|
||||
proxy = conf().get('proxy')
|
||||
openai.api_key = conf().get("open_ai_api_key")
|
||||
if conf().get("open_ai_api_base"):
|
||||
openai.api_base = conf().get("open_ai_api_base")
|
||||
proxy = conf().get("proxy")
|
||||
if proxy:
|
||||
openai.proxy = proxy
|
||||
if conf().get('rate_limit_chatgpt'):
|
||||
self.tb4chatgpt = TokenBucket(conf().get('rate_limit_chatgpt', 20))
|
||||
|
||||
self.sessions = SessionManager(ChatGPTSession, model= conf().get("model") or "gpt-3.5-turbo")
|
||||
self.args ={
|
||||
if conf().get("rate_limit_chatgpt"):
|
||||
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
|
||||
|
||||
self.sessions = SessionManager(
|
||||
ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo"
|
||||
)
|
||||
self.args = {
|
||||
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
|
||||
"temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
|
||||
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
|
||||
# "max_tokens":4096, # 回复最大的字符数
|
||||
"top_p":1,
|
||||
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"request_timeout": conf().get('request_timeout', None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
|
||||
"timeout": conf().get('request_timeout', None), #重试超时时间,在这个时间内,将会自动重试
|
||||
"top_p": 1,
|
||||
"frequency_penalty": conf().get(
|
||||
"frequency_penalty", 0.0
|
||||
), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"presence_penalty": conf().get(
|
||||
"presence_penalty", 0.0
|
||||
), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"request_timeout": conf().get(
|
||||
"request_timeout", None
|
||||
), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
|
||||
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
|
||||
}
|
||||
|
||||
def reply(self, query, context=None):
|
||||
@@ -44,39 +55,50 @@ class ChatGPTBot(Bot,OpenAIImage):
|
||||
if context.type == ContextType.TEXT:
|
||||
logger.info("[CHATGPT] query={}".format(query))
|
||||
|
||||
|
||||
session_id = context['session_id']
|
||||
session_id = context["session_id"]
|
||||
reply = None
|
||||
clear_memory_commands = conf().get('clear_memory_commands', ['#清除记忆'])
|
||||
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
|
||||
if query in clear_memory_commands:
|
||||
self.sessions.clear_session(session_id)
|
||||
reply = Reply(ReplyType.INFO, '记忆已清除')
|
||||
elif query == '#清除所有':
|
||||
reply = Reply(ReplyType.INFO, "记忆已清除")
|
||||
elif query == "#清除所有":
|
||||
self.sessions.clear_all_session()
|
||||
reply = Reply(ReplyType.INFO, '所有人记忆已清除')
|
||||
elif query == '#更新配置':
|
||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
|
||||
elif query == "#更新配置":
|
||||
load_config()
|
||||
reply = Reply(ReplyType.INFO, '配置已更新')
|
||||
reply = Reply(ReplyType.INFO, "配置已更新")
|
||||
if reply:
|
||||
return reply
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
logger.debug("[CHATGPT] session query={}".format(session.messages))
|
||||
|
||||
api_key = context.get('openai_api_key')
|
||||
api_key = context.get("openai_api_key")
|
||||
|
||||
# if context.get('stream'):
|
||||
# # reply in stream
|
||||
# return self.reply_text_stream(query, new_query, session_id)
|
||||
|
||||
reply_content = self.reply_text(session, api_key)
|
||||
logger.debug("[CHATGPT] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content["content"], reply_content["completion_tokens"]))
|
||||
if reply_content['completion_tokens'] == 0 and len(reply_content['content']) > 0:
|
||||
reply = Reply(ReplyType.ERROR, reply_content['content'])
|
||||
logger.debug(
|
||||
"[CHATGPT] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
|
||||
session.messages,
|
||||
session_id,
|
||||
reply_content["content"],
|
||||
reply_content["completion_tokens"],
|
||||
)
|
||||
)
|
||||
if (
|
||||
reply_content["completion_tokens"] == 0
|
||||
and len(reply_content["content"]) > 0
|
||||
):
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
elif reply_content["completion_tokens"] > 0:
|
||||
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
|
||||
self.sessions.session_reply(
|
||||
reply_content["content"], session_id, reply_content["total_tokens"]
|
||||
)
|
||||
reply = Reply(ReplyType.TEXT, reply_content["content"])
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, reply_content['content'])
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
logger.debug("[CHATGPT] reply {} used 0 tokens.".format(reply_content))
|
||||
return reply
|
||||
|
||||
@@ -89,53 +111,55 @@ class ChatGPTBot(Bot,OpenAIImage):
|
||||
reply = Reply(ReplyType.ERROR, retstring)
|
||||
return reply
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, 'Bot不支持处理{}类型的消息'.format(context.type))
|
||||
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
|
||||
return reply
|
||||
|
||||
def reply_text(self, session:ChatGPTSession, api_key=None, retry_count=0) -> dict:
|
||||
'''
|
||||
def reply_text(self, session: ChatGPTSession, api_key=None, retry_count=0) -> dict:
|
||||
"""
|
||||
call openai's ChatCompletion to get the answer
|
||||
:param session: a conversation session
|
||||
:param session_id: session id
|
||||
:param retry_count: retry count
|
||||
:return: {}
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token():
|
||||
if conf().get("rate_limit_chatgpt") and not self.tb4chatgpt.get_token():
|
||||
raise openai.error.RateLimitError("RateLimitError: rate limit exceeded")
|
||||
# if api_key == None, the default openai.api_key will be used
|
||||
response = openai.ChatCompletion.create(
|
||||
api_key=api_key, messages=session.messages, **self.args
|
||||
)
|
||||
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
|
||||
return {"total_tokens": response["usage"]["total_tokens"],
|
||||
"completion_tokens": response["usage"]["completion_tokens"],
|
||||
"content": response.choices[0]['message']['content']}
|
||||
return {
|
||||
"total_tokens": response["usage"]["total_tokens"],
|
||||
"completion_tokens": response["usage"]["completion_tokens"],
|
||||
"content": response.choices[0]["message"]["content"],
|
||||
}
|
||||
except Exception as e:
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if isinstance(e, openai.error.RateLimitError):
|
||||
logger.warn("[CHATGPT] RateLimitError: {}".format(e))
|
||||
result['content'] = "提问太快啦,请休息一下再问我吧"
|
||||
result["content"] = "提问太快啦,请休息一下再问我吧"
|
||||
if need_retry:
|
||||
time.sleep(5)
|
||||
elif isinstance(e, openai.error.Timeout):
|
||||
logger.warn("[CHATGPT] Timeout: {}".format(e))
|
||||
result['content'] = "我没有收到你的消息"
|
||||
result["content"] = "我没有收到你的消息"
|
||||
if need_retry:
|
||||
time.sleep(5)
|
||||
elif isinstance(e, openai.error.APIConnectionError):
|
||||
logger.warn("[CHATGPT] APIConnectionError: {}".format(e))
|
||||
need_retry = False
|
||||
result['content'] = "我连接不到你的网络"
|
||||
result["content"] = "我连接不到你的网络"
|
||||
else:
|
||||
logger.warn("[CHATGPT] Exception: {}".format(e))
|
||||
need_retry = False
|
||||
self.sessions.clear_session(session.session_id)
|
||||
|
||||
if need_retry:
|
||||
logger.warn("[CHATGPT] 第{}次重试".format(retry_count+1))
|
||||
return self.reply_text(session, api_key, retry_count+1)
|
||||
logger.warn("[CHATGPT] 第{}次重试".format(retry_count + 1))
|
||||
return self.reply_text(session, api_key, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
|
||||
@@ -145,4 +169,4 @@ class AzureChatGPTBot(ChatGPTBot):
|
||||
super().__init__()
|
||||
openai.api_type = "azure"
|
||||
openai.api_version = "2023-03-15-preview"
|
||||
self.args["deployment_id"] = conf().get("azure_deployment_id")
|
||||
self.args["deployment_id"] = conf().get("azure_deployment_id")
|
||||
|
||||
@@ -1,20 +1,23 @@
|
||||
from bot.session_manager import Session
|
||||
from common.log import logger
|
||||
'''
|
||||
|
||||
"""
|
||||
e.g. [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who won the world series in 2020?"},
|
||||
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
|
||||
{"role": "user", "content": "Where was it played?"}
|
||||
]
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
class ChatGPTSession(Session):
|
||||
def __init__(self, session_id, system_prompt=None, model= "gpt-3.5-turbo"):
|
||||
def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"):
|
||||
super().__init__(session_id, system_prompt)
|
||||
self.model = model
|
||||
self.reset()
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens= None):
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
precise = True
|
||||
try:
|
||||
cur_tokens = self.calc_tokens()
|
||||
@@ -22,7 +25,9 @@ class ChatGPTSession(Session):
|
||||
precise = False
|
||||
if cur_tokens is None:
|
||||
raise e
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
logger.debug(
|
||||
"Exception when counting tokens precisely for query: {}".format(e)
|
||||
)
|
||||
while cur_tokens > max_tokens:
|
||||
if len(self.messages) > 2:
|
||||
self.messages.pop(1)
|
||||
@@ -34,25 +39,32 @@ class ChatGPTSession(Session):
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
break
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
|
||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
|
||||
logger.warn(
|
||||
"user message exceed max_tokens. total_tokens={}".format(cur_tokens)
|
||||
)
|
||||
break
|
||||
else:
|
||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
|
||||
logger.debug(
|
||||
"max_tokens={}, total_tokens={}, len(messages)={}".format(
|
||||
max_tokens, cur_tokens, len(self.messages)
|
||||
)
|
||||
)
|
||||
break
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
return cur_tokens
|
||||
|
||||
|
||||
def calc_tokens(self):
|
||||
return num_tokens_from_messages(self.messages, self.model)
|
||||
|
||||
|
||||
|
||||
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
def num_tokens_from_messages(messages, model):
|
||||
"""Returns the number of tokens used by a list of messages."""
|
||||
import tiktoken
|
||||
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
@@ -63,13 +75,17 @@ def num_tokens_from_messages(messages, model):
|
||||
elif model == "gpt-4":
|
||||
return num_tokens_from_messages(messages, model="gpt-4-0314")
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_message = (
|
||||
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
)
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif model == "gpt-4-0314":
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
else:
|
||||
logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
logger.warn(
|
||||
f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo-0301."
|
||||
)
|
||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
|
||||
@@ -1,41 +1,52 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import time
|
||||
|
||||
import openai
|
||||
import openai.error
|
||||
|
||||
from bot.bot import Bot
|
||||
from bot.openai.open_ai_image import OpenAIImage
|
||||
from bot.openai.open_ai_session import OpenAISession
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from config import conf
|
||||
from common.log import logger
|
||||
import openai
|
||||
import openai.error
|
||||
import time
|
||||
from config import conf
|
||||
|
||||
user_session = dict()
|
||||
|
||||
|
||||
# OpenAI对话模型API (可用)
|
||||
class OpenAIBot(Bot, OpenAIImage):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
openai.api_key = conf().get('open_ai_api_key')
|
||||
if conf().get('open_ai_api_base'):
|
||||
openai.api_base = conf().get('open_ai_api_base')
|
||||
proxy = conf().get('proxy')
|
||||
openai.api_key = conf().get("open_ai_api_key")
|
||||
if conf().get("open_ai_api_base"):
|
||||
openai.api_base = conf().get("open_ai_api_base")
|
||||
proxy = conf().get("proxy")
|
||||
if proxy:
|
||||
openai.proxy = proxy
|
||||
|
||||
self.sessions = SessionManager(OpenAISession, model= conf().get("model") or "text-davinci-003")
|
||||
self.sessions = SessionManager(
|
||||
OpenAISession, model=conf().get("model") or "text-davinci-003"
|
||||
)
|
||||
self.args = {
|
||||
"model": conf().get("model") or "text-davinci-003", # 对话模型的名称
|
||||
"temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
|
||||
"max_tokens":1200, # 回复最大的字符数
|
||||
"top_p":1,
|
||||
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"request_timeout": conf().get('request_timeout', None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
|
||||
"timeout": conf().get('request_timeout', None), #重试超时时间,在这个时间内,将会自动重试
|
||||
"stop":["\n\n\n"]
|
||||
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
|
||||
"max_tokens": 1200, # 回复最大的字符数
|
||||
"top_p": 1,
|
||||
"frequency_penalty": conf().get(
|
||||
"frequency_penalty", 0.0
|
||||
), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"presence_penalty": conf().get(
|
||||
"presence_penalty", 0.0
|
||||
), # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
"request_timeout": conf().get(
|
||||
"request_timeout", None
|
||||
), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
|
||||
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
|
||||
"stop": ["\n\n\n"],
|
||||
}
|
||||
|
||||
def reply(self, query, context=None):
|
||||
@@ -43,24 +54,34 @@ class OpenAIBot(Bot, OpenAIImage):
|
||||
if context and context.type:
|
||||
if context.type == ContextType.TEXT:
|
||||
logger.info("[OPEN_AI] query={}".format(query))
|
||||
session_id = context['session_id']
|
||||
session_id = context["session_id"]
|
||||
reply = None
|
||||
if query == '#清除记忆':
|
||||
if query == "#清除记忆":
|
||||
self.sessions.clear_session(session_id)
|
||||
reply = Reply(ReplyType.INFO, '记忆已清除')
|
||||
elif query == '#清除所有':
|
||||
reply = Reply(ReplyType.INFO, "记忆已清除")
|
||||
elif query == "#清除所有":
|
||||
self.sessions.clear_all_session()
|
||||
reply = Reply(ReplyType.INFO, '所有人记忆已清除')
|
||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
|
||||
else:
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
result = self.reply_text(session)
|
||||
total_tokens, completion_tokens, reply_content = result['total_tokens'], result['completion_tokens'], result['content']
|
||||
logger.debug("[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens))
|
||||
total_tokens, completion_tokens, reply_content = (
|
||||
result["total_tokens"],
|
||||
result["completion_tokens"],
|
||||
result["content"],
|
||||
)
|
||||
logger.debug(
|
||||
"[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
|
||||
str(session), session_id, reply_content, completion_tokens
|
||||
)
|
||||
)
|
||||
|
||||
if total_tokens == 0 :
|
||||
if total_tokens == 0:
|
||||
reply = Reply(ReplyType.ERROR, reply_content)
|
||||
else:
|
||||
self.sessions.session_reply(reply_content, session_id, total_tokens)
|
||||
self.sessions.session_reply(
|
||||
reply_content, session_id, total_tokens
|
||||
)
|
||||
reply = Reply(ReplyType.TEXT, reply_content)
|
||||
return reply
|
||||
elif context.type == ContextType.IMAGE_CREATE:
|
||||
@@ -72,42 +93,44 @@ class OpenAIBot(Bot, OpenAIImage):
|
||||
reply = Reply(ReplyType.ERROR, retstring)
|
||||
return reply
|
||||
|
||||
def reply_text(self, session:OpenAISession, retry_count=0):
|
||||
def reply_text(self, session: OpenAISession, retry_count=0):
|
||||
try:
|
||||
response = openai.Completion.create(
|
||||
prompt=str(session), **self.args
|
||||
response = openai.Completion.create(prompt=str(session), **self.args)
|
||||
res_content = (
|
||||
response.choices[0]["text"].strip().replace("<|endoftext|>", "")
|
||||
)
|
||||
res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
|
||||
total_tokens = response["usage"]["total_tokens"]
|
||||
completion_tokens = response["usage"]["completion_tokens"]
|
||||
logger.info("[OPEN_AI] reply={}".format(res_content))
|
||||
return {"total_tokens": total_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"content": res_content}
|
||||
return {
|
||||
"total_tokens": total_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"content": res_content,
|
||||
}
|
||||
except Exception as e:
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if isinstance(e, openai.error.RateLimitError):
|
||||
logger.warn("[OPEN_AI] RateLimitError: {}".format(e))
|
||||
result['content'] = "提问太快啦,请休息一下再问我吧"
|
||||
result["content"] = "提问太快啦,请休息一下再问我吧"
|
||||
if need_retry:
|
||||
time.sleep(5)
|
||||
elif isinstance(e, openai.error.Timeout):
|
||||
logger.warn("[OPEN_AI] Timeout: {}".format(e))
|
||||
result['content'] = "我没有收到你的消息"
|
||||
result["content"] = "我没有收到你的消息"
|
||||
if need_retry:
|
||||
time.sleep(5)
|
||||
elif isinstance(e, openai.error.APIConnectionError):
|
||||
logger.warn("[OPEN_AI] APIConnectionError: {}".format(e))
|
||||
need_retry = False
|
||||
result['content'] = "我连接不到你的网络"
|
||||
result["content"] = "我连接不到你的网络"
|
||||
else:
|
||||
logger.warn("[OPEN_AI] Exception: {}".format(e))
|
||||
need_retry = False
|
||||
self.sessions.clear_session(session.session_id)
|
||||
|
||||
if need_retry:
|
||||
logger.warn("[OPEN_AI] 第{}次重试".format(retry_count+1))
|
||||
return self.reply_text(session, retry_count+1)
|
||||
logger.warn("[OPEN_AI] 第{}次重试".format(retry_count + 1))
|
||||
return self.reply_text(session, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
return result
|
||||
|
||||
@@ -1,38 +1,45 @@
|
||||
import time
|
||||
|
||||
import openai
|
||||
import openai.error
|
||||
from common.token_bucket import TokenBucket
|
||||
|
||||
from common.log import logger
|
||||
from common.token_bucket import TokenBucket
|
||||
from config import conf
|
||||
|
||||
|
||||
# OPENAI提供的画图接口
|
||||
class OpenAIImage(object):
|
||||
def __init__(self):
|
||||
openai.api_key = conf().get('open_ai_api_key')
|
||||
if conf().get('rate_limit_dalle'):
|
||||
self.tb4dalle = TokenBucket(conf().get('rate_limit_dalle', 50))
|
||||
|
||||
openai.api_key = conf().get("open_ai_api_key")
|
||||
if conf().get("rate_limit_dalle"):
|
||||
self.tb4dalle = TokenBucket(conf().get("rate_limit_dalle", 50))
|
||||
|
||||
def create_img(self, query, retry_count=0):
|
||||
try:
|
||||
if conf().get('rate_limit_dalle') and not self.tb4dalle.get_token():
|
||||
if conf().get("rate_limit_dalle") and not self.tb4dalle.get_token():
|
||||
return False, "请求太快了,请休息一下再问我吧"
|
||||
logger.info("[OPEN_AI] image_query={}".format(query))
|
||||
response = openai.Image.create(
|
||||
prompt=query, #图片描述
|
||||
n=1, #每次生成图片的数量
|
||||
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024
|
||||
prompt=query, # 图片描述
|
||||
n=1, # 每次生成图片的数量
|
||||
size="256x256", # 图片大小,可选有 256x256, 512x512, 1024x1024
|
||||
)
|
||||
image_url = response['data'][0]['url']
|
||||
image_url = response["data"][0]["url"]
|
||||
logger.info("[OPEN_AI] image_url={}".format(image_url))
|
||||
return True, image_url
|
||||
except openai.error.RateLimitError as e:
|
||||
logger.warn(e)
|
||||
if retry_count < 1:
|
||||
time.sleep(5)
|
||||
logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
|
||||
return self.create_img(query, retry_count+1)
|
||||
logger.warn(
|
||||
"[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(
|
||||
retry_count + 1
|
||||
)
|
||||
)
|
||||
return self.create_img(query, retry_count + 1)
|
||||
else:
|
||||
return False, "提问太快啦,请休息一下再问我吧"
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return False, str(e)
|
||||
return False, str(e)
|
||||
|
||||
@@ -1,32 +1,34 @@
|
||||
from bot.session_manager import Session
|
||||
from common.log import logger
|
||||
|
||||
|
||||
class OpenAISession(Session):
|
||||
def __init__(self, session_id, system_prompt=None, model= "text-davinci-003"):
|
||||
def __init__(self, session_id, system_prompt=None, model="text-davinci-003"):
|
||||
super().__init__(session_id, system_prompt)
|
||||
self.model = model
|
||||
self.reset()
|
||||
|
||||
def __str__(self):
|
||||
# 构造对话模型的输入
|
||||
'''
|
||||
"""
|
||||
e.g. Q: xxx
|
||||
A: xxx
|
||||
Q: xxx
|
||||
'''
|
||||
"""
|
||||
prompt = ""
|
||||
for item in self.messages:
|
||||
if item['role'] == 'system':
|
||||
prompt += item['content'] + "<|endoftext|>\n\n\n"
|
||||
elif item['role'] == 'user':
|
||||
prompt += "Q: " + item['content'] + "\n"
|
||||
elif item['role'] == 'assistant':
|
||||
prompt += "\n\nA: " + item['content'] + "<|endoftext|>\n"
|
||||
if item["role"] == "system":
|
||||
prompt += item["content"] + "<|endoftext|>\n\n\n"
|
||||
elif item["role"] == "user":
|
||||
prompt += "Q: " + item["content"] + "\n"
|
||||
elif item["role"] == "assistant":
|
||||
prompt += "\n\nA: " + item["content"] + "<|endoftext|>\n"
|
||||
|
||||
if len(self.messages) > 0 and self.messages[-1]['role'] == 'user':
|
||||
if len(self.messages) > 0 and self.messages[-1]["role"] == "user":
|
||||
prompt += "A: "
|
||||
return prompt
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens= None):
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
precise = True
|
||||
try:
|
||||
cur_tokens = self.calc_tokens()
|
||||
@@ -34,7 +36,9 @@ class OpenAISession(Session):
|
||||
precise = False
|
||||
if cur_tokens is None:
|
||||
raise e
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
logger.debug(
|
||||
"Exception when counting tokens precisely for query: {}".format(e)
|
||||
)
|
||||
while cur_tokens > max_tokens:
|
||||
if len(self.messages) > 1:
|
||||
self.messages.pop(0)
|
||||
@@ -46,24 +50,34 @@ class OpenAISession(Session):
|
||||
cur_tokens = len(str(self))
|
||||
break
|
||||
elif len(self.messages) == 1 and self.messages[0]["role"] == "user":
|
||||
logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens))
|
||||
logger.warn(
|
||||
"user question exceed max_tokens. total_tokens={}".format(
|
||||
cur_tokens
|
||||
)
|
||||
)
|
||||
break
|
||||
else:
|
||||
logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages)))
|
||||
logger.debug(
|
||||
"max_tokens={}, total_tokens={}, len(conversation)={}".format(
|
||||
max_tokens, cur_tokens, len(self.messages)
|
||||
)
|
||||
)
|
||||
break
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = len(str(self))
|
||||
return cur_tokens
|
||||
|
||||
|
||||
def calc_tokens(self):
|
||||
return num_tokens_from_string(str(self), self.model)
|
||||
|
||||
|
||||
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
def num_tokens_from_string(string: str, model: str) -> int:
|
||||
"""Returns the number of tokens in a text string."""
|
||||
import tiktoken
|
||||
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
num_tokens = len(encoding.encode(string,disallowed_special=()))
|
||||
return num_tokens
|
||||
num_tokens = len(encoding.encode(string, disallowed_special=()))
|
||||
return num_tokens
|
||||
|
||||
@@ -2,6 +2,7 @@ from common.expired_dict import ExpiredDict
|
||||
from common.log import logger
|
||||
from config import conf
|
||||
|
||||
|
||||
class Session(object):
|
||||
def __init__(self, session_id, system_prompt=None):
|
||||
self.session_id = session_id
|
||||
@@ -13,7 +14,7 @@ class Session(object):
|
||||
|
||||
# 重置会话
|
||||
def reset(self):
|
||||
system_item = {'role': 'system', 'content': self.system_prompt}
|
||||
system_item = {"role": "system", "content": self.system_prompt}
|
||||
self.messages = [system_item]
|
||||
|
||||
def set_system_prompt(self, system_prompt):
|
||||
@@ -21,13 +22,13 @@ class Session(object):
|
||||
self.reset()
|
||||
|
||||
def add_query(self, query):
|
||||
user_item = {'role': 'user', 'content': query}
|
||||
user_item = {"role": "user", "content": query}
|
||||
self.messages.append(user_item)
|
||||
|
||||
def add_reply(self, reply):
|
||||
assistant_item = {'role': 'assistant', 'content': reply}
|
||||
assistant_item = {"role": "assistant", "content": reply}
|
||||
self.messages.append(assistant_item)
|
||||
|
||||
|
||||
def discard_exceeding(self, max_tokens=None, cur_tokens=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -37,8 +38,8 @@ class Session(object):
|
||||
|
||||
class SessionManager(object):
|
||||
def __init__(self, sessioncls, **session_args):
|
||||
if conf().get('expires_in_seconds'):
|
||||
sessions = ExpiredDict(conf().get('expires_in_seconds'))
|
||||
if conf().get("expires_in_seconds"):
|
||||
sessions = ExpiredDict(conf().get("expires_in_seconds"))
|
||||
else:
|
||||
sessions = dict()
|
||||
self.sessions = sessions
|
||||
@@ -46,20 +47,22 @@ class SessionManager(object):
|
||||
self.session_args = session_args
|
||||
|
||||
def build_session(self, session_id, system_prompt=None):
|
||||
'''
|
||||
如果session_id不在sessions中,创建一个新的session并添加到sessions中
|
||||
如果system_prompt不会空,会更新session的system_prompt并重置session
|
||||
'''
|
||||
"""
|
||||
如果session_id不在sessions中,创建一个新的session并添加到sessions中
|
||||
如果system_prompt不会空,会更新session的system_prompt并重置session
|
||||
"""
|
||||
if session_id is None:
|
||||
return self.sessioncls(session_id, system_prompt, **self.session_args)
|
||||
|
||||
|
||||
if session_id not in self.sessions:
|
||||
self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args)
|
||||
self.sessions[session_id] = self.sessioncls(
|
||||
session_id, system_prompt, **self.session_args
|
||||
)
|
||||
elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session
|
||||
self.sessions[session_id].set_system_prompt(system_prompt)
|
||||
session = self.sessions[session_id]
|
||||
return session
|
||||
|
||||
|
||||
def session_query(self, query, session_id):
|
||||
session = self.build_session(session_id)
|
||||
session.add_query(query)
|
||||
@@ -68,23 +71,33 @@ class SessionManager(object):
|
||||
total_tokens = session.discard_exceeding(max_tokens, None)
|
||||
logger.debug("prompt tokens used={}".format(total_tokens))
|
||||
except Exception as e:
|
||||
logger.debug("Exception when counting tokens precisely for prompt: {}".format(str(e)))
|
||||
logger.debug(
|
||||
"Exception when counting tokens precisely for prompt: {}".format(str(e))
|
||||
)
|
||||
return session
|
||||
|
||||
def session_reply(self, reply, session_id, total_tokens = None):
|
||||
def session_reply(self, reply, session_id, total_tokens=None):
|
||||
session = self.build_session(session_id)
|
||||
session.add_reply(reply)
|
||||
try:
|
||||
max_tokens = conf().get("conversation_max_tokens", 1000)
|
||||
tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
|
||||
logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt))
|
||||
logger.debug(
|
||||
"raw total_tokens={}, savesession tokens={}".format(
|
||||
total_tokens, tokens_cnt
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Exception when counting tokens precisely for session: {}".format(str(e)))
|
||||
logger.debug(
|
||||
"Exception when counting tokens precisely for session: {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
return session
|
||||
|
||||
def clear_session(self, session_id):
|
||||
if session_id in self.sessions:
|
||||
del(self.sessions[session_id])
|
||||
del self.sessions[session_id]
|
||||
|
||||
def clear_all_session(self):
|
||||
self.sessions.clear()
|
||||
|
||||
Reference in New Issue
Block a user