Compare commits

..

51 Commits

Author SHA1 Message Date
vision 22d67b3a59 Merge pull request #2364 from 6vision/1031
1.7.3 release readme
2024-10-31 14:44:55 +08:00
6vision e102cbb8c4 1.7.3 release readme 2024-10-31 14:39:11 +08:00
vision d90eeb7ee4 Merge pull request #2363 from 6vision/linkai_plugin
Summary and MJ  support can be configured through LinkAI platform app plugins
2024-10-31 11:50:53 +08:00
vision 1989d53031 Merge pull request #2361 from 6vision/claude_model_update
Claude model update
2024-10-31 11:50:11 +08:00
6vision 04ef0907b4 Summary and MJ support can be configured through LinkAI platform app plugins. 2024-10-31 11:15:44 +08:00
6vision 517b43561c Merge branch 'claude_model_update' of git@github.com:6vision/chatgpt-on-wechat.git into claude_model_update 2024-10-28 00:32:46 +08:00
6vision ccb8c7227f Support setting base URL and proxy for Claude model. Also support reset command. 2024-10-28 00:32:05 +08:00
vision 9fbfeeb04f Merge branch 'zhayujie:master' into claude_model_update 2024-10-27 23:43:16 +08:00
6vision 8b753a5a1f Signed-off-by: 6vision <vision_wangpc@sina.com> 2024-10-27 21:44:06 +08:00
6vision d25cab0627 Claude model supports system prompts. 2024-10-27 21:37:58 +08:00
6vision 84da0a8a35 feat:update claude-35-sonnet model 2024-10-24 20:57:03 +08:00
vision 6f665cffba Merge pull request #2354 from 6vision/group_patpat_note
fix: group patpat notes
2024-10-24 19:53:18 +08:00
6vision aea8ac2e97 Signed-off-by: 6vision <vision_wangpc@sina.com> 2024-10-24 19:48:50 +08:00
vision 8418fa7b45 Merge pull request #2344 from 6vision/markdown_format_display
Optimize markdown format display
2024-10-21 10:27:03 +08:00
6vision 9cc4d0ee07 Optimize markdown format display 2024-10-21 10:23:39 +08:00
Saboteur7 da60831c44 fix: fixed the version of qrcode dependency 2024-10-19 16:14:49 +08:00
Saboteur7 0773174a20 Merge branch 'master' of github.com:zhayujie/chatgpt-on-wechat 2024-10-19 15:55:04 +08:00
Saboteur7 70e007d8ca fix: try to solve the unresponsiveness problem 2024-10-19 15:49:57 +08:00
vision fcc4d02c2f Merge pull request #2339 from 6vision/master
Optimize Gemini model character statistics
2024-10-14 12:19:27 +08:00
vision f4a5f00593 Merge branch 'zhayujie:master' into master 2024-10-14 12:18:33 +08:00
6vision 1170ed6566 Optimize Gemini model character statistics 2024-10-14 12:17:10 +08:00
zhayujie 883f0d449b Merge pull request #2317 from 6vision/master
feat: add install.sh and run.sh
2024-09-26 16:43:56 +08:00
6vision f4c62e7844 update install.sh url 2024-09-26 16:43:12 +08:00
6vision f0d212a9d2 Merge branch 'master' of github.com:6vision/chatgpt-on-wechat 2024-09-26 16:02:19 +08:00
6vision 76a8974034 update run.sh 2024-09-26 16:01:44 +08:00
vision 0614e822f4 Merge branch 'zhayujie:master' into master 2024-09-26 13:07:45 +08:00
vision 6f682c9a2e Merge pull request #2311 from cmgzn/master
fix: gemini doesn't receive system messages...
2024-09-26 13:04:47 +08:00
6vision a9fdbc31c5 update date 2024-09-26 13:02:38 +08:00
cmgzn 086fdb5856 fix gemini logger 2024-09-26 02:49:52 +01:00
6vision 63c8ef4f17 feat: install.sh and run.sh 2024-09-26 00:34:52 +08:00
zhayujie 736f6523c7 Merge branch 'master' into master 2024-09-25 23:11:13 +08:00
vision 8b0b360d25 Merge pull request #2288 from KuroIVeko/patch-3
Support more models from Zhipu AI
2024-09-25 22:28:16 +08:00
vision 80b84e2ee6 Merge pull request #2277 from KuroIVeko/patch-1
Lower Gemini's safety thresholds
2024-09-25 22:24:20 +08:00
vision b5b7d86f7b Merge pull request #2278 from 6vision/moonshoot
fix: "model":"mooshoot", which defaults to "moonshot-v1-32k".
2024-09-25 22:10:40 +08:00
cmgzn f20d704390 fix: gemini doesn't receive system messages; change session to gpt method, add system messages as user messages to the gemini, and logging historical messages 2024-09-20 09:10:21 +01:00
vision e4e1e2e944 Merge pull request #2306 from 6vision/master
fix: Linkai voice configuration
2024-09-18 19:43:41 +08:00
vision 6bc7eeb4cc Merge branch 'zhayujie:master' into master 2024-09-18 19:41:23 +08:00
6vision 656ed5de7b fix: LinkAI voice onfiguration 2024-09-18 19:40:51 +08:00
zhayujie a11d695c78 Merge pull request #2300 from 6vision/master
feat: support o1-preview and o1-mini model
2024-09-13 10:50:04 +08:00
6vision c4f9acd5c5 update 2024-09-13 10:48:51 +08:00
6vision 5ef929dc42 o1 model support #model 2024-09-13 10:21:38 +08:00
6vision c8cf27b544 feat: support o1-preview and o1-mini model 2024-09-13 10:13:23 +08:00
vision bb5ecfc398 Merge pull request #2298 from 6vision/error_print_ascii_windows
Handle ASCII QR code print error on Windows
2024-09-11 22:35:30 +08:00
6vision c91e7c35bb Remove unused imports 2024-09-11 22:34:33 +08:00
6vision 532d56df2d Handle ASCII QR code print error on Windows 2024-09-11 22:30:25 +08:00
KurolVeko 111ad44029 Update const.py 2024-09-05 11:07:06 +08:00
KurolVeko 6b02bae957 Update bridge.py 2024-09-05 10:59:57 +08:00
vision 6831743416 Merge pull request #2286 from 6vision/gpt
feat: support gpt-4o-2024-08-06 model
2024-09-04 18:44:08 +08:00
6vision 63e2f42636 feat: support gpt-4o-2024-08-06 model 2024-09-04 18:39:29 +08:00
6vision f6e6805453 fix: "model":"mooshoot", which defaults to "moonshot-v1-32k". 2024-08-31 16:09:10 +08:00
KurolVeko ad77ad8f2b Lower Gemini's safety thresholds
Gemini's default safety thresholds are set too high, resulting in frequent censorship of generated text. I have lowered the thresholds for all four safety categories according to Google's documentation.
2024-08-30 17:00:51 +08:00
23 changed files with 465 additions and 92 deletions
+10 -2
View File
@@ -45,8 +45,11 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
<br>
# 🏷 更新日志
>**2024.10.31** [1.7.3版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.3) 程序稳定性提升、数据库功能、Claude模型优化、linkai插件优化、离线通知
>**2024.08.02** [1.7.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.9) 新增 讯飞4.0 模型、知识库引用来源展示、相关插件优化
>**2024.09.26** [1.7.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.2) 和 [1.7.1版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.1) 文心,讯飞等模型优化、o1 模型、快速安装和管理脚本
>**2024.08.02** [1.7.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.7.0) 新增 讯飞4.0 模型、知识库引用来源展示、相关插件优化
>**2024.07.19** [1.6.9版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.6.9) 新增 gpt-4o-mini 模型、阿里语音识别、企微应用渠道路由优化
@@ -80,8 +83,13 @@ DEMO视频:https://cdn.link-ai.tech/doc/cow_demo.mp4
# 🚀 快速开始
快速开始详细文档:[项目搭建文档](https://docs.link-ai.tech/cow/quick-start)
- 快速开始详细文档:[项目搭建文档](https://docs.link-ai.tech/cow/quick-start)
- 快速安装脚本,详细使用指导:[一键安装启动脚本](https://github.com/zhayujie/chatgpt-on-wechat/wiki/%E4%B8%80%E9%94%AE%E5%AE%89%E8%A3%85%E5%90%AF%E5%8A%A8%E8%84%9A%E6%9C%AC)
```bash
bash <(curl -sS https://cdn.link-ai.tech/code/cow/install.sh)
```
- 项目管理脚本,详细使用指导:[项目管理脚本](https://github.com/zhayujie/chatgpt-on-wechat/wiki/%E9%A1%B9%E7%9B%AE%E7%AE%A1%E7%90%86%E8%84%9A%E6%9C%AC)
## 一、准备
### 1. 账号注册
+12 -4
View File
@@ -5,7 +5,7 @@ import time
import openai
import openai.error
import requests
from common import const
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
@@ -15,7 +15,7 @@ from bridge.reply import Reply, ReplyType
from common.log import logger
from common.token_bucket import TokenBucket
from config import conf, load_config
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
# OpenAI对话模型API (可用)
class ChatGPTBot(Bot, OpenAIImage):
@@ -30,10 +30,12 @@ class ChatGPTBot(Bot, OpenAIImage):
openai.proxy = proxy
if conf().get("rate_limit_chatgpt"):
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
conf_model = conf().get("model") or "gpt-3.5-turbo"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
# o1相关模型不支持system prompt,暂时用文心模型的session
self.args = {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"model": conf_model, # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p": conf().get("top_p", 1),
@@ -42,6 +44,12 @@ class ChatGPTBot(Bot, OpenAIImage):
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
}
# o1相关模型固定了部分参数,暂时去掉
if conf_model in [const.O1, const.O1_MINI]:
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"]
for key in remove_keys:
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误
def reply(self, query, context=None):
# acquire reply content
+2 -2
View File
@@ -57,7 +57,7 @@ class ChatGPTSession(Session):
def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
if model in ["wenxin", "xunfei", const.GEMINI]:
if model in ["wenxin", "xunfei"] or model.startswith(const.GEMINI):
return num_tokens_by_character(messages)
import tiktoken
@@ -67,7 +67,7 @@ def num_tokens_from_messages(messages, model):
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview",
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW, const.GPT4_TURBO_01_25,
const.GPT_4o, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO]:
const.GPT_4o, const.GPT_4O_0806, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO]:
return num_tokens_from_messages(messages, model="gpt-4")
elif model.startswith("claude-3"):
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
+17 -20
View File
@@ -8,12 +8,12 @@ import anthropic
from bot.bot import Bot
from bot.openai.open_ai_image import OpenAIImage
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.gemini.google_gemini_bot import GoogleGeminiBot
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from bot.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from common import const
from config import conf
user_session = dict()
@@ -23,17 +23,14 @@ user_session = dict()
class ClaudeAPIBot(Bot, OpenAIImage):
def __init__(self):
super().__init__()
proxy = conf().get("proxy", None)
base_url = conf().get("open_ai_api_base", None) # 复用"open_ai_api_base"参数作为base_url
self.claudeClient = anthropic.Anthropic(
api_key=conf().get("claude_api_key")
api_key=conf().get("claude_api_key"),
proxies=proxy if proxy else None,
base_url=base_url if base_url else None
)
openai.api_key = conf().get("open_ai_api_key")
if conf().get("open_ai_api_base"):
openai.api_base = conf().get("open_ai_api_base")
proxy = conf().get("proxy")
if proxy:
openai.proxy = proxy
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "text-davinci-003")
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "text-davinci-003")
def reply(self, query, context=None):
# acquire reply content
@@ -76,14 +73,14 @@ class ClaudeAPIBot(Bot, OpenAIImage):
reply = Reply(ReplyType.ERROR, retstring)
return reply
def reply_text(self, session: ChatGPTSession, retry_count=0):
def reply_text(self, session: BaiduWenxinSession, retry_count=0):
try:
actual_model = self._model_mapping(conf().get("model"))
response = self.claudeClient.messages.create(
model=actual_model,
max_tokens=1024,
# system=conf().get("system"),
messages=GoogleGeminiBot.filter_messages(session.messages)
max_tokens=4096,
system=conf().get("character_desc", ""),
messages=session.messages
)
# response = openai.Completion.create(prompt=str(session), **self.args)
res_content = response.content[0].text.strip().replace("<|endoftext|>", "")
@@ -97,7 +94,7 @@ class ClaudeAPIBot(Bot, OpenAIImage):
}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
result = {"total_tokens": 0, "completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, openai.error.RateLimitError):
logger.warn("[CLAUDE_API] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
@@ -125,11 +122,11 @@ class ClaudeAPIBot(Bot, OpenAIImage):
def _model_mapping(self, model) -> str:
if model == "claude-3-opus":
return "claude-3-opus-20240229"
return const.CLAUDE_3_OPUS
elif model == "claude-3-sonnet":
return "claude-3-sonnet-20240229"
return const.CLAUDE_3_SONNET
elif model == "claude-3-haiku":
return "claude-3-haiku-20240307"
return const.CLAUDE_3_HAIKU
elif model == "claude-3.5-sonnet":
return "claude-3-5-sonnet-20240620"
return const.CLAUDE_35_SONNET
return model
+46 -12
View File
@@ -13,7 +13,9 @@ from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
from google.generativeai.types import HarmCategory, HarmBlockThreshold
# OpenAI对话模型API (可用)
@@ -22,8 +24,8 @@ class GoogleGeminiBot(Bot):
def __init__(self):
super().__init__()
self.api_key = conf().get("gemini_api_key")
# 复用文心的token计算方式
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "gpt-3.5-turbo")
# 复用chatGPT的token计算方式
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
self.model = conf().get("model") or "gemini-pro"
if self.model == "gemini":
self.model = "gemini-pro"
@@ -36,18 +38,44 @@ class GoogleGeminiBot(Bot):
session_id = context["session_id"]
session = self.sessions.session_query(query, session_id)
gemini_messages = self._convert_to_gemini_messages(self.filter_messages(session.messages))
logger.debug(f"[Gemini] messages={gemini_messages}")
genai.configure(api_key=self.api_key)
model = genai.GenerativeModel(self.model)
response = model.generate_content(gemini_messages)
reply_text = response.text
self.sessions.session_reply(reply_text, session_id)
logger.info(f"[Gemini] reply={reply_text}")
return Reply(ReplyType.TEXT, reply_text)
# 添加安全设置
safety_settings = {
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
}
# 生成回复,包含安全设置
response = model.generate_content(
gemini_messages,
safety_settings=safety_settings
)
if response.candidates and response.candidates[0].content:
reply_text = response.candidates[0].content.parts[0].text
logger.info(f"[Gemini] reply={reply_text}")
self.sessions.session_reply(reply_text, session_id)
return Reply(ReplyType.TEXT, reply_text)
else:
# 没有有效响应内容,可能内容被屏蔽,输出安全评分
logger.warning("[Gemini] No valid response generated. Checking safety ratings.")
if hasattr(response, 'candidates') and response.candidates:
for rating in response.candidates[0].safety_ratings:
logger.warning(f"Safety rating: {rating.category} - {rating.probability}")
error_message = "No valid response generated due to safety constraints."
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
except Exception as e:
logger.error("[Gemini] fetch reply error, may contain unsafe content")
logger.error(e)
return Reply(ReplyType.ERROR, "invoke [Gemini] api failed!")
logger.error(f"[Gemini] Error generating response: {str(e)}", exc_info=True)
error_message = "Failed to invoke [Gemini] api!"
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
def _convert_to_gemini_messages(self, messages: list):
res = []
for msg in messages:
@@ -55,6 +83,8 @@ class GoogleGeminiBot(Bot):
role = "user"
elif msg.get("role") == "assistant":
role = "model"
elif msg.get("role") == "system":
role = "user"
else:
continue
res.append({
@@ -71,7 +101,11 @@ class GoogleGeminiBot(Bot):
return res
for i in range(len(messages) - 1, -1, -1):
message = messages[i]
if message.get("role") != turn:
role = message.get("role")
if role == "system":
res.insert(0, message)
continue
if role != turn:
continue
res.insert(0, message)
if turn == "user":
+4 -1
View File
@@ -19,8 +19,11 @@ class MoonshotBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(MoonshotSession, model=conf().get("model") or "moonshot-v1-128k")
model = conf().get("model") or "moonshot-v1-128k"
if model == "moonshot":
model = "moonshot-v1-32k"
self.args = {
"model": conf().get("model") or "moonshot-v1-128k", # 对话模型的名称
"model": model, # 对话模型的名称
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
"top_p": conf().get("top_p", 1.0), # 使用默认值
}
+2 -2
View File
@@ -38,7 +38,7 @@ class Bridge(object):
self.btype["chat"] = const.QWEN_DASHSCOPE
if model_type and model_type.startswith("gemini"):
self.btype["chat"] = const.GEMINI
if model_type in [const.ZHIPU_AI]:
if model_type and model_type.startswith("glm"):
self.btype["chat"] = const.ZHIPU_AI
if model_type and model_type.startswith("claude-3"):
self.btype["chat"] = const.CLAUDEAPI
@@ -46,7 +46,7 @@ class Bridge(object):
if model_type in ["claude"]:
self.btype["chat"] = const.CLAUDEAI
if model_type in ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
self.btype["chat"] = const.MOONSHOT
if model_type in ["abab6.5-chat"]:
+14 -11
View File
@@ -337,24 +337,27 @@ class ChatChannel(Channel):
while True:
with self.lock:
session_ids = list(self.sessions.keys())
for session_id in session_ids:
for session_id in session_ids:
with self.lock:
context_queue, semaphore = self.sessions[session_id]
if semaphore.acquire(blocking=False): # 等线程处理完毕才能删除
if not context_queue.empty():
context = context_queue.get()
logger.debug("[chat_channel] consume context: {}".format(context))
future: Future = handler_pool.submit(self._handle, context)
future.add_done_callback(self._thread_pool_callback(session_id, context=context))
if semaphore.acquire(blocking=False): # 等线程处理完毕才能删除
if not context_queue.empty():
context = context_queue.get()
logger.debug("[chat_channel] consume context: {}".format(context))
future: Future = handler_pool.submit(self._handle, context)
future.add_done_callback(self._thread_pool_callback(session_id, context=context))
with self.lock:
if session_id not in self.futures:
self.futures[session_id] = []
self.futures[session_id].append(future)
elif semaphore._initial_value == semaphore._value + 1: # 除了当前,没有任务再申请到信号量,说明所有任务都处理完毕
elif semaphore._initial_value == semaphore._value + 1: # 除了当前,没有任务再申请到信号量,说明所有任务都处理完毕
with self.lock:
self.futures[session_id] = [t for t in self.futures[session_id] if not t.done()]
assert len(self.futures[session_id]) == 0, "thread pool error"
del self.sessions[session_id]
else:
semaphore.release()
time.sleep(0.1)
else:
semaphore.release()
time.sleep(0.2)
# 取消session_id对应的所有任务,只能取消排队的消息和已提交线程池但未执行的任务
def cancel_session(self, session_id):
+7 -2
View File
@@ -20,7 +20,7 @@ from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from common.time_check import time_checker
from common.utils import convert_webp_to_png
from common.utils import convert_webp_to_png, remove_markdown_symbol
from config import conf, get_appdata_dir
from lib import itchat
from lib.itchat.content import *
@@ -100,7 +100,10 @@ def qrCallback(uuid, status, qrcode):
qr = qrcode.QRCode(border=1)
qr.add_data(url)
qr.make(fit=True)
qr.print_ascii(invert=True)
try:
qr.print_ascii(invert=True)
except UnicodeEncodeError:
print("ASCII QR code printing failed due to encoding issues.")
@singleton
@@ -210,9 +213,11 @@ class WechatChannel(ChatChannel):
def send(self, reply: Reply, context: Context):
receiver = context["receiver"]
if reply.type == ReplyType.TEXT:
reply.content = remove_markdown_symbol(reply.content)
itchat.send(reply.content, toUserName=receiver)
logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver))
elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO:
reply.content = remove_markdown_symbol(reply.content)
itchat.send(reply.content, toUserName=receiver)
logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver))
elif reply.type == ReplyType.VOICE:
+10 -5
View File
@@ -55,6 +55,16 @@ class WechatMessage(ChatMessage):
self.ctype = ContextType.EXIT_GROUP
self.content = itchat_msg["Content"]
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif any(note_patpat in itchat_msg["Content"] for note_patpat in notes_patpat): # 若有任何在notes_patpat列表中的字符串出现在NOTE中:
self.ctype = ContextType.PATPAT
self.content = itchat_msg["Content"]
if "拍了拍我" in itchat_msg["Content"]: # 识别中文
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif "tickled my" in itchat_msg["Content"] or "tickled me" in itchat_msg["Content"]:
self.actual_user_nickname = re.findall(r'^(.*?)(?:tickled my|tickled me)', itchat_msg["Content"])[0]
else:
raise NotImplementedError("Unsupported note message: " + itchat_msg["Content"])
elif "你已添加了" in itchat_msg["Content"]: #通过好友请求
self.ctype = ContextType.ACCEPT_FRIEND
@@ -62,11 +72,6 @@ class WechatMessage(ChatMessage):
elif any(note_patpat in itchat_msg["Content"] for note_patpat in notes_patpat): # 若有任何在notes_patpat列表中的字符串出现在NOTE中:
self.ctype = ContextType.PATPAT
self.content = itchat_msg["Content"]
if is_group:
if "拍了拍我" in itchat_msg["Content"]: # 识别中文
self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
elif ("tickled my" in itchat_msg["Content"] or "tickled me" in itchat_msg["Content"]):
self.actual_user_nickname = re.findall(r'^(.*?)(?:tickled my|tickled me)', itchat_msg["Content"])[0]
else:
raise NotImplementedError("Unsupported note message: " + itchat_msg["Content"])
elif itchat_msg["Type"] == ATTACHMENT:
+2 -2
View File
@@ -17,7 +17,7 @@ from channel.wechatcom.wechatcomapp_client import WechatComAppClient
from channel.wechatcom.wechatcomapp_message import WechatComAppMessage
from common.log import logger
from common.singleton import singleton
from common.utils import compress_imgfile, fsize, split_string_by_utf8_length, convert_webp_to_png
from common.utils import compress_imgfile, fsize, split_string_by_utf8_length, convert_webp_to_png, remove_markdown_symbol
from config import conf, subscribe_msg
from voice.audio_convert import any_to_amr, split_audio
@@ -52,7 +52,7 @@ class WechatComAppChannel(ChatChannel):
def send(self, reply: Reply, context: Context):
receiver = context["receiver"]
if reply.type in [ReplyType.TEXT, ReplyType.ERROR, ReplyType.INFO]:
reply_text = reply.content
reply_text = remove_markdown_symbol(reply.content)
texts = split_string_by_utf8_length(reply_text, MAX_UTF8_LEN)
if len(texts) > 1:
logger.info("[wechatcom] text too long, split into {} parts".format(len(texts)))
+2 -2
View File
@@ -19,7 +19,7 @@ from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_client import WechatMPClient
from common.log import logger
from common.singleton import singleton
from common.utils import split_string_by_utf8_length
from common.utils import split_string_by_utf8_length, remove_markdown_symbol
from config import conf
from voice.audio_convert import any_to_mp3, split_audio
@@ -81,7 +81,7 @@ class WechatMPChannel(ChatChannel):
receiver = context["receiver"]
if self.passive_reply:
if reply.type == ReplyType.TEXT or reply.type == ReplyType.INFO or reply.type == ReplyType.ERROR:
reply_text = reply.content
reply_text = remove_markdown_symbol(reply.content)
logger.info("[wechatmp] text cached, receiver {}\n{}".format(receiver, reply_text))
self.cache_dict[receiver].append(("text", reply_text))
elif reply.type == ReplyType.VOICE:
+29 -3
View File
@@ -24,6 +24,7 @@ GPT35_0125 = "gpt-3.5-turbo-0125"
GPT35_1106 = "gpt-3.5-turbo-1106"
GPT_4o = "gpt-4o"
GPT_4O_0806 = "gpt-4o-2024-08-06"
GPT4_TURBO = "gpt-4-turbo"
GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview"
GPT4_TURBO_04_09 = "gpt-4-turbo-2024-04-09"
@@ -37,6 +38,9 @@ GPT4_32k = "gpt-4-32k"
GPT4_06_13 = "gpt-4-0613"
GPT4_32k_06_13 = "gpt-4-32k-0613"
O1 = "o1-preview"
O1_MINI = "o1-mini"
WHISPER_1 = "whisper-1"
TTS_1 = "tts-1"
TTS_1_HD = "tts-1-hd"
@@ -56,13 +60,35 @@ GEMINI_PRO = "gemini-1.0-pro"
GEMINI_15_flash = "gemini-1.5-flash"
GEMINI_15_PRO = "gemini-1.5-pro"
GLM_4 = "glm-4"
GLM_4_PLUS = "glm-4-plus"
GLM_4_flash = "glm-4-flash"
GLM_4_LONG = "glm-4-long"
GLM_4_ALLTOOLS = "glm-4-alltools"
GLM_4_0520 = "glm-4-0520"
GLM_4_AIR = "glm-4-air"
GLM_4_AIRX = "glm-4-airx"
CLAUDE_3_OPUS = "claude-3-opus-latest"
CLAUDE_3_OPUS_0229 = "claude-3-opus-20240229"
CLAUDE_35_SONNET = "claude-3-5-sonnet-latest" # 带 latest 标签的模型名称,会不断更新指向最新发布的模型
CLAUDE_35_SONNET_1022 = "claude-3-5-sonnet-20241022" # 带具体日期的模型名称,会固定为该日期发布的模型
CLAUDE_35_SONNET_0620 = "claude-3-5-sonnet-20240620"
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
MODEL_LIST = [
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
GPT_4o, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
WEN_XIN, WEN_XIN_4,
XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax,
XUNFEI,
ZHIPU_AI, GLM_4, GLM_4_PLUS, GLM_4_flash, GLM_4_LONG, GLM_4_ALLTOOLS, GLM_4_0520, GLM_4_AIR, GLM_4_AIRX,
MOONSHOT, MiniMax,
GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,
"claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3-opus-20240229", "claude-3.5-sonnet",
CLAUDE_3_OPUS, CLAUDE_3_OPUS_0229, CLAUDE_35_SONNET, CLAUDE_35_SONNET_1022, CLAUDE_35_SONNET_0620, CLAUDE_3_SONNET, CLAUDE_3_HAIKU, "claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3.5-sonnet",
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
QWEN, QWEN_TURBO, QWEN_PLUS, QWEN_MAX,
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o
+5
View File
@@ -42,8 +42,13 @@ class ChatClient(LinkAIClient):
if reply_voice_mode:
if reply_voice_mode == "voice_reply_voice":
local_config["voice_reply_voice"] = True
local_config["always_reply_voice"] = False
elif reply_voice_mode == "always_reply_voice":
local_config["always_reply_voice"] = True
local_config["voice_reply_voice"] = True
elif reply_voice_mode == "no_reply_voice":
local_config["always_reply_voice"] = False
local_config["voice_reply_voice"] = False
if config.get("admin_password"):
if not plugin_config.get("Godcmd"):
+8
View File
@@ -1,5 +1,6 @@
import io
import os
import re
from urllib.parse import urlparse
from PIL import Image
from common.log import logger
@@ -68,3 +69,10 @@ def convert_webp_to_png(webp_image):
except Exception as e:
logger.error(f"Failed to convert WEBP to PNG: {e}")
raise
def remove_markdown_symbol(text: str):
# 移除markdown格式,目前先移除**
if not text:
return text
return re.sub(r'\*\*(.*?)\*\*', r'\1', text)
+1 -1
View File
@@ -313,7 +313,7 @@ class Godcmd(Plugin):
except Exception as e:
ok, result = False, "你没有设置私有GPT模型"
elif cmd == "reset":
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI]:
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.CLAUDEAPI]:
bot.sessions.clear_session(session_id)
if Bridge().chat_bots.get(bottype):
Bridge().chat_bots.get(bottype).sessions.clear_session(session_id)
+2
View File
@@ -98,6 +98,8 @@
如果不想创建 `plugins/linkai/config.json` 配置,可以直接通过 `$linkai sum open` 指令开启该功能。
也可以通过私聊(全局 `config.json` 中的 `linkai_app_code`)或者群聊绑定(通过`group_app_map`参数配置)的应用来开启该功能:在LinkAI平台 [应用配置](https://link-ai.tech/console/factory) 里添加并开启**内容总结**插件。
#### 使用
功能开启后,向机器人发送 **文件****分享链接卡片**、**图片** 即可生成摘要,进一步可以与文件或链接的内容进行多轮对话。如果需要关闭某种类型的内容总结,设置 `summary`配置中的type字段即可。
+48 -13
View File
@@ -9,7 +9,7 @@ from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
from config import plugin_config
from config import plugin_config, conf
@plugins.register(
@@ -28,7 +28,7 @@ class LinkAI(Plugin):
# 未加载到配置,使用模板中的配置
self.config = self._load_config_template()
if self.config:
self.mj_bot = MJBot(self.config.get("midjourney"))
self.mj_bot = MJBot(self.config.get("midjourney"), self._fetch_group_app_code)
self.sum_config = {}
if self.config:
self.sum_config = self.config.get("summary")
@@ -56,7 +56,8 @@ class LinkAI(Plugin):
return
if context.type != ContextType.IMAGE:
_send_info(e_context, "正在为你加速生成摘要,请稍后")
res = LinkSummary().summary_file(file_path)
app_code = self._fetch_app_code(context)
res = LinkSummary().summary_file(file_path, app_code)
if not res:
if context.type != ContextType.IMAGE:
_set_reply_text("因为神秘力量无法获取内容,请稍后再试吧", e_context, level=ReplyType.TEXT)
@@ -74,7 +75,8 @@ class LinkAI(Plugin):
if not LinkSummary().check_url(context.content):
return
_send_info(e_context, "正在为你加速生成摘要,请稍后")
res = LinkSummary().summary_url(context.content)
app_code = self._fetch_app_code(context)
res = LinkSummary().summary_url(context.content, app_code)
if not res:
_set_reply_text("因为神秘力量无法获取文章内容,请稍后再试吧~", e_context, level=ReplyType.TEXT)
return
@@ -169,7 +171,7 @@ class LinkAI(Plugin):
return
if len(cmd) == 3 and cmd[1] == "sum" and (cmd[2] == "open" or cmd[2] == "close"):
# 知识库开关指令
# 总结对话开关指令
if not Util.is_admin(e_context):
_set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
return
@@ -192,14 +194,34 @@ class LinkAI(Plugin):
return
def _is_summary_open(self, context) -> bool:
if not self.sum_config or not self.sum_config.get("enabled"):
return False
if context.kwargs.get("isgroup") and not self.sum_config.get("group_enabled"):
return False
support_type = self.sum_config.get("type") or ["FILE", "SHARING"]
if context.type.name not in support_type and context.type.name != "TEXT":
return False
return True
# 获取远程应用插件状态
remote_enabled = False
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
else:
# 非群聊场景使用全局app_code
app_code = conf().get("linkai_app_code")
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
# 基础条件:总开关开启且消息类型符合要求
base_enabled = (
self.sum_config
and self.sum_config.get("enabled")
and (context.type.name in (
self.sum_config.get("type") or ["FILE", "SHARING"]) or context.type.name == "TEXT")
)
# 群聊:需要满足(总开关和群开关)或远程插件开启
if context.kwargs.get("isgroup"):
return (base_enabled and self.sum_config.get("group_enabled")) or remote_enabled
# 非群聊:只需要满足总开关或远程插件开启
return base_enabled or remote_enabled
# LinkAI 对话任务处理
def _is_chat_task(self, e_context: EventContext):
@@ -230,6 +252,19 @@ class LinkAI(Plugin):
app_code = group_mapping.get(group_name) or group_mapping.get("ALL_GROUP")
return app_code
def _fetch_app_code(self, context) -> str:
"""
根据主配置或者群聊名称获取对应的应用code,优先获取群聊配置的应用code
:param context: 上下文
:return: 应用code
"""
app_code = conf().get("linkai_app_code")
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
return app_code
def get_help_text(self, verbose=False, **kwargs):
trigger_prefix = _get_trigger_prefix()
help_text = "用于集成 LinkAI 提供的知识库、Midjourney绘画、文档总结、联网搜索等能力。\n\n"
+25 -4
View File
@@ -10,6 +10,7 @@ from bridge.context import ContextType
from plugins import EventContext, EventAction
from .utils import Util
INVALID_REQUEST = 410
NOT_FOUND_ORIGIN_IMAGE = 461
NOT_FOUND_TASK = 462
@@ -67,10 +68,11 @@ class MJTask:
# midjourney bot
class MJBot:
def __init__(self, config):
def __init__(self, config, fetch_group_app_code):
self.base_url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/img/midjourney"
self.headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
self.config = config
self.fetch_group_app_code = fetch_group_app_code
self.tasks = {}
self.temp_dict = {}
self.tasks_lock = threading.Lock()
@@ -98,7 +100,7 @@ class MJBot:
return TaskType.VARIATION
elif cmd_list[0].lower() == f"{trigger_prefix}mjr":
return TaskType.RESET
elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix") and self.config.get("enabled"):
elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix") and self._is_mj_open(context):
return TaskType.GENERATE
def process_mj_task(self, mj_type: TaskType, e_context: EventContext):
@@ -129,8 +131,8 @@ class MJBot:
self._set_reply_text(f"Midjourney绘画已{tips_text}", e_context, level=ReplyType.INFO)
return
if not self.config.get("enabled"):
logger.warn("Midjourney绘画未开启,请查看 plugins/linkai/config.json 中的配置")
if not self._is_mj_open(context):
logger.warn("Midjourney绘画未开启,请查看 plugins/linkai/config.json 中的配置,或者在LinkAI平台 应用中添加/打开”MJ“插件")
self._set_reply_text(f"Midjourney绘画未开启", e_context, level=ReplyType.INFO)
return
@@ -409,6 +411,25 @@ class MJBot:
result.append(task)
return result
def _is_mj_open(self, context) -> bool:
# 获取远程应用插件状态
remote_enabled = False
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self.fetch_group_app_code(group_name)
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "Midjourney")
else:
# 非群聊场景使用全局app_code
app_code = conf().get("linkai_app_code")
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "Midjourney")
# 本地配置
base_enabled = self.config.get("enabled")
return base_enabled or remote_enabled
def _send(channel, reply: Reply, context, retry_cnt=0):
try:
+5 -3
View File
@@ -9,19 +9,21 @@ class LinkSummary:
def __init__(self):
pass
def summary_file(self, file_path: str):
def summary_file(self, file_path: str, app_code: str):
file_body = {
"file": open(file_path, "rb"),
"name": file_path.split("/")[-1],
"app_code": app_code
}
url = self.base_url() + "/v1/summary/file"
res = requests.post(url, headers=self.headers(), files=file_body, timeout=(5, 300))
return self._parse_summary_res(res)
def summary_url(self, url: str):
def summary_url(self, url: str, app_code: str):
url = html.unescape(url)
body = {
"url": url
"url": url,
"app_code": app_code
}
res = requests.post(url=self.base_url() + "/v1/summary/url", headers=self.headers(), json=body, timeout=(5, 180))
return self._parse_summary_res(res)
+20 -1
View File
@@ -1,7 +1,9 @@
import requests
from common.log import logger
from config import global_config
from bridge.reply import Reply, ReplyType
from plugins.event import EventContext, EventAction
from config import conf
class Util:
@staticmethod
@@ -26,3 +28,20 @@ class Util:
reply = Reply(level, content)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
@staticmethod
def fetch_app_plugin(app_code: str, plugin_name: str) -> bool:
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
params = {"app_code": app_code}
res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
if res.status_code == 200:
plugins = res.json().get("data").get("plugins")
for plugin in plugins:
if plugin.get("name") and plugin.get("name") == plugin_name:
return True
return False
else:
logger.warning(f"[LinkAI] find app info exception, res={res}")
return False
+2 -2
View File
@@ -1,7 +1,7 @@
openai==0.27.8
HTMLParser>=0.0.2
PyQRCode>=1.2.1
qrcode>=7.4.2
PyQRCode==1.2.1
qrcode==7.4.2
requests>=2.28.2
chardet>=5.1.0
Pillow
+192
View File
@@ -0,0 +1,192 @@
#!/usr/bin/env bash
set -e
# 颜色定义
RED='\033[0;31m' # 红色
GREEN='\033[0;32m' # 绿色
YELLOW='\033[0;33m' # 黄色
BLUE='\033[0;34m' # 蓝色
NC='\033[0m' # 无颜色
# 获取当前脚本的目录
export BASE_DIR=$(cd "$(dirname "$0")"; pwd)
echo -e "${GREEN}📁 BASE_DIR: ${BASE_DIR}${NC}"
# 检查 config.json 文件是否存在
check_config_file() {
if [ ! -f "${BASE_DIR}/config.json" ]; then
echo -e "${RED}❌ 错误:未找到 config.json 文件。请确保 config.json 存在于当前目录。${NC}"
exit 1
fi
}
# 检查 Python 版本是否大于等于 3.7,并检查 pip 是否可用
check_python_version() {
if ! command -v python3 &> /dev/null; then
echo -e "${RED}❌ 错误:未找到 Python3。请安装 Python 3.7 或以上版本。${NC}"
exit 1
fi
PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
PYTHON_MAJOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f1)
PYTHON_MINOR=$(echo "$PYTHON_VERSION" | cut -d'.' -f2)
if (( PYTHON_MAJOR < 3 || (PYTHON_MAJOR == 3 && PYTHON_MINOR < 7) )); then
echo -e "${RED}❌ 错误:Python 版本为 ${PYTHON_VERSION}。请安装 Python 3.7 或以上版本。${NC}"
exit 1
fi
if ! python3 -m pip --version &> /dev/null; then
echo -e "${RED}❌ 错误:未找到 pip。请安装 pip。${NC}"
exit 1
fi
}
# 检查并安装缺失的依赖
install_dependencies() {
echo -e "${YELLOW}⏳ 正在安装依赖...${NC}"
if [ ! -f "${BASE_DIR}/requirements.txt" ]; then
echo -e "${RED}❌ 错误:未找到 requirements.txt 文件。${NC}"
exit 1
fi
# 安装 requirements.txt 中的依赖,使用清华大学的 PyPI 镜像
pip3 install -r "${BASE_DIR}/requirements.txt" -i https://pypi.tuna.tsinghua.edu.cn/simple
# 处理 requirements-optional.txt(如果存在)
if [ -f "${BASE_DIR}/requirements-optional.txt" ]; then
echo -e "${YELLOW}⏳ 正在安装可选的依赖...${NC}"
pip3 install -r "${BASE_DIR}/requirements-optional.txt" -i https://pypi.tuna.tsinghua.edu.cn/simple
fi
}
# 启动项目
run_project() {
echo -e "${GREEN}🚀 准备启动项目...${NC}"
cd "${BASE_DIR}"
sleep 2
# 判断操作系统类型
OS_TYPE=$(uname)
if [[ "$OS_TYPE" == "Linux" ]]; then
# 在 Linux 上使用 setsid
setsid python3 "${BASE_DIR}/app.py" > "${BASE_DIR}/nohup.out" 2>&1 &
echo -e "${GREEN}🚀 正在启动 ChatGPT-on-WeChat (Linux)...${NC}"
elif [[ "$OS_TYPE" == "Darwin" ]]; then
# 在 macOS 上直接运行
python3 "${BASE_DIR}/app.py" > "${BASE_DIR}/nohup.out" 2>&1 &
echo -e "${GREEN}🚀 正在启动 ChatGPT-on-WeChat (macOS)...${NC}"
else
echo -e "${RED}❌ 错误:不支持的操作系统 ${OS_TYPE}${NC}"
exit 1
fi
sleep 2
# 显示日志输出,供用户扫码
tail -n 30 -f "${BASE_DIR}/nohup.out"
}
# 更新项目
update_project() {
echo -e "${GREEN}🔄 准备更新项目,现在停止项目...${NC}"
cd "${BASE_DIR}"
# 停止项目
stop_project
echo -e "${GREEN}🔄 开始更新项目...${NC}"
# 更新代码,从 git 仓库拉取最新代码
if [ -d .git ]; then
GIT_PULL_OUTPUT=$(git pull)
if [ $? -eq 0 ]; then
if [[ "$GIT_PULL_OUTPUT" == *"Already up to date."* ]]; then
echo -e "${GREEN}✅ 代码已经是最新的。${NC}"
else
echo -e "${GREEN}✅ 代码更新完成。${NC}"
fi
else
echo -e "${YELLOW}⚠️ 从 GitHub 更新失败,尝试切换到 Gitee 仓库...${NC}"
# 更改远程仓库为 Gitee
git remote set-url origin https://gitee.com/zhayujie/chatgpt-on-wechat.git
GIT_PULL_OUTPUT=$(git pull)
if [ $? -eq 0 ]; then
if [[ "$GIT_PULL_OUTPUT" == *"Already up to date."* ]]; then
echo -e "${GREEN}✅ 代码已经是最新的。${NC}"
else
echo -e "${GREEN}✅ 从 Gitee 更新成功。${NC}"
fi
else
echo -e "${RED}❌ 错误:从 Gitee 更新仍然失败,请检查网络连接。${NC}"
exit 1
fi
fi
else
echo -e "${RED}❌ 错误:当前目录不是 git 仓库,无法更新代码。${NC}"
exit 1
fi
# 安装依赖
install_dependencies
# 启动项目
run_project
}
# 停止项目
stop_project() {
echo -e "${GREEN}🛑 正在停止项目...${NC}"
cd "${BASE_DIR}"
pid=$(ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}')
if [ -z "$pid" ] ; then
echo -e "${YELLOW}⚠️ 未找到正在运行的 ChatGPT-on-WeChat。${NC}"
return
fi
echo -e "${GREEN}🛑 正在运行的 ChatGPT-on-WeChat (PID: ${pid})${NC}"
kill ${pid}
sleep 3
if ps -p $pid > /dev/null; then
echo -e "${YELLOW}⚠️ 进程未停止,尝试强制终止...${NC}"
kill -9 ${pid}
fi
echo -e "${GREEN}✅ 已停止 ChatGPT-on-WeChat (PID: ${pid})${NC}"
}
# 主函数,根据用户参数执行操作
case "$1" in
start)
check_config_file
check_python_version
run_project
;;
stop)
stop_project
;;
restart)
stop_project
check_config_file
check_python_version
run_project
;;
update)
check_config_file
check_python_version
update_project
;;
*)
echo -e "${YELLOW}=========================================${NC}"
echo -e "${YELLOW}用法:${GREEN}$0 ${BLUE}{start|stop|restart|update}${NC}"
echo -e "${YELLOW}示例:${NC}"
echo -e " ${GREEN}$0 ${BLUE}start${NC}"
echo -e " ${GREEN}$0 ${BLUE}stop${NC}"
echo -e " ${GREEN}$0 ${BLUE}restart${NC}"
echo -e " ${GREEN}$0 ${BLUE}update${NC}"
echo -e "${YELLOW}=========================================${NC}"
exit 1
;;
esac