Compare commits

...

61 Commits

Author SHA1 Message Date
lanvent 9d2cb75c84 fix(docker): chown /usr/local/lib in debian dockerfile 2023-05-10 23:12:43 +08:00
Jianglang f971505c4a Update README.md 2023-05-09 23:29:03 +08:00
lanvent 2133c1d6af fix(Dockerfile): create /home/noroot directory and change ownership of it 2023-05-09 23:08:20 +08:00
Jianglang 0bf06ddfd3 Merge pull request #1046 from theLastWinner/master
fix(企业微信):补充缺失依赖textwrap
2023-05-08 17:33:46 +08:00
Jianglang 024a50d642 Merge pull request #1045 from wqh0109663/master
fix docker entrypoint
2023-05-08 17:33:22 +08:00
林督翔 e4eebd64d1 fix(企业微信):补充缺失依赖textwrap 2023-05-08 09:39:32 +08:00
wuqih c9055989e9 fix 2023-05-08 09:09:46 +08:00
lanvent 4f1ed197ce fix: compatible with python 3.7 2023-05-07 23:36:35 +08:00
Jianglang 3e710aa2a1 Merge pull request #1032 from wqh0109663/master
修复docker入口错误
2023-05-06 17:16:06 +08:00
wuqih b6226a45bb fix 2023-05-06 14:29:36 +08:00
lanvent 3001ba9266 fix: azure dalle generate image 2023-04-28 11:06:17 +08:00
lanvent b0a401a1ed fix(azure_dalle): use openai.api_base 2023-04-28 10:53:30 +08:00
Jianglang 6b4dc37428 Update README.md 2023-04-28 01:24:26 +08:00
lanvent 8528c9b262 feat(tool.py): add new configuration options for think_depth, arxiv_summary, and morning_news_use_llm 2023-04-28 00:24:07 +08:00
lanvent 7222a5c2f4 feat: add VERSION constant 2023-04-28 00:13:13 +08:00
lanvent 59050001ef Update README.md 2023-04-28 00:10:57 +08:00
lanvent 2ba8f18724 feat: add railway method for wechatcomapp 2023-04-28 00:04:55 +08:00
lanvent fb22e01b89 fix: send voice in wechatcomapp rightly 2023-04-27 23:04:24 +08:00
lanvent 76a81d5360 feat(wechatcomapp): add support for splitting long audio files 2023-04-27 22:47:50 +08:00
lanvent 3314b05648 feat: add support for azure dalle 2023-04-27 22:16:42 +08:00
lanvent 45b89218de fix: support set_openai_api_key for all channels 2023-04-27 20:43:12 +08:00
lanvent beb7bda243 fix(docker): use debian.latest as latest image 2023-04-27 19:45:51 +08:00
lanvent bef2896f50 add libavcodec-extra to Dockerfile 2023-04-27 15:09:24 +08:00
lanvent 9fea949b25 fix(azure_voice.py): log error details instead of cancellation details 2023-04-27 11:42:19 +08:00
lanvent be258e5b05 fix: add more log in itchat 2023-04-27 11:23:28 +08:00
lanvent 008178d737 fix(login.py): add error message when retry count is exceeded 2023-04-27 11:03:08 +08:00
lanvent 527d5e1dbc fix(itchat): add error log when hot reload fails and log out before logging in normally 2023-04-27 02:46:53 +08:00
lanvent 9b47e2d6f9 fix: output itchat error msg rightly 2023-04-26 22:54:53 +08:00
lanvent 8781b1e976 fix: role,dungeon,godcmd support azure bot 2023-04-26 01:05:23 +08:00
Jianglang 38c653d8d8 Merge pull request #957 from goldfishh/master
plugin(tool): 更新0.4.2
2023-04-26 00:53:07 +08:00
lanvent 74e48bb137 Update README.md 2023-04-26 00:49:40 +08:00
goldfishh c3aaa1f735 plugin(tool): 更新0.4.2 2023-04-26 00:48:54 +08:00
lanvent bead2aa228 fix: a typo in template 2023-04-26 00:23:08 +08:00
Jianglang dc52ab8aa9 Merge pull request #944 from zhayujie/wechatcom-app
添加企业微信应用号部署方式,支持插件,支持语音图片交互
2023-04-26 00:02:31 +08:00
lanvent 20b71f206b feat: add subscribe_msg option for wechatmp, wechatmp_service, and wechatcom_app channels 2023-04-26 00:01:04 +08:00
lanvent 73c87d5959 fix(wechatcomapp): split long text messages into multiple parts 2023-04-25 01:48:15 +08:00
lanvent c6601aaeed fix: ensure get access_token thread-safe 2023-04-25 01:11:50 +08:00
lanvent 6e14fce1fe docs: update README.md for wechatcom_app 2023-04-25 00:44:16 +08:00
lanvent be5a62f1b8 Merge Pull Request #936 into wechatcom-app 2023-04-24 22:41:42 +08:00
Jianglang 1fa8cefaea Add contact link in ISSUE_TEMPLATE 2023-04-24 16:38:19 +08:00
Jianglang d7c251ac83 Update README.md 2023-04-24 02:21:44 +08:00
lanvent d03229a183 Update ISSUE_TEMPLATE 2023-04-24 02:06:34 +08:00
lanvent 243482e829 Update ISSUE_TEMPLATE 2023-04-24 02:02:16 +08:00
lanvent 79d10be8a0 fix(wechatmp): add clear_quota_lock to ensure thread safe 2023-04-24 00:38:34 +08:00
JS00000 dca5c058e0 fix: Avoid the same filename under multithreading (#933) 2023-04-23 23:56:32 +08:00
lanvent 9163ce71fd fix: enable plugins for wechatcom_app 2023-04-23 16:51:16 +08:00
lanvent 2ec5374765 feat:modify wechatcom to wechatcom_app 2023-04-23 15:40:28 +08:00
lanvent d6a4b35cd3 chore: add numpy version constraint 2023-04-23 15:07:38 +08:00
lanvent 8205d2552c fix(Dockerfile): add extra-index-url to pip install command 2023-04-23 15:01:10 +08:00
lanvent 9a99caeb9d chore: add fetch_translate method to Bridge class 2023-04-23 05:12:50 +08:00
lanvent 1e09bd0e76 feat(azure_voice): add language detection, support mulitple languages 2023-04-23 04:28:46 +08:00
lanvent cae12eb187 feat: add baidu translate api 2023-04-23 03:54:16 +08:00
zhayujie 8bb36e0eb6 Merge pull request #926 from zhayujie/dev
docs: update README
2023-04-22 18:04:04 +08:00
zhayujie d183204caa docs: update README.md 2023-04-22 18:02:12 +08:00
zhayujie 4a22ae6b61 docs: update README.md 2023-04-22 17:53:43 +08:00
lanvent 665001732b feat: add image compression
Add image compression feature to WechatComAppChannel to compress images larger than 10MB before uploading to WeChat server. The compression is done using the `compress_imgfile` function in `utils.py`. The `fsize` function is also added to `utils.py` to calculate the size of a file or buffer.
2023-04-21 15:29:59 +08:00
lanvent 3e9e8d442a docs: add README.md for wechatcomapp channel 2023-04-20 08:43:17 +08:00
lanvent d2bf90c6c7 refactor: rename WechatComChannel to WechatComAppChannel 2023-04-20 08:31:42 +08:00
lanvent 3ea8781381 feat(wechatcom): add support for sending image 2023-04-20 02:14:52 +08:00
lanvent ab83dacb76 feat(wechatcom): add support for sending voice messages 2023-04-20 01:46:23 +08:00
lanvent 4cbf46fd4d feat: add support for wechatcom channel 2023-04-20 01:03:04 +08:00
54 changed files with 946 additions and 233 deletions
-31
View File
@@ -1,31 +0,0 @@
### 前置确认
1. 网络能够访问openai接口
2. python 已安装:版本在 3.7 ~ 3.10 之间
3. `git pull` 拉取最新代码
4. 执行`pip3 install -r requirements.txt`,检查依赖是否满足
5. 拓展功能请执行`pip3 install -r requirements-optional.txt`,检查依赖是否满足
6. 在已有 issue 中未搜索到类似问题
7. [FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) 中无类似问题
### 问题描述
> 简要说明、截图、复现步骤等,也可以是需求或想法
### 终端日志 (如有报错)
```
[在此处粘贴终端日志, 可在主目录下`run.log`文件中找到]
```
### 环境
- 操作系统类型 (Mac/Windows/Linux)
- Python版本 ( 执行 `python3 -V` )
- pip版本 ( 依赖问题此项必填,执行 `pip3 -V`)
+133
View File
@@ -0,0 +1,133 @@
name: Bug report 🐛
description: 项目运行中遇到的Bug或问题。
labels: ['status: needs check']
body:
- type: markdown
attributes:
value: |
### ⚠️ 前置确认
1. 网络能够访问openai接口
2. python 已安装:版本在 3.7 ~ 3.10 之间
3. `git pull` 拉取最新代码
4. 执行`pip3 install -r requirements.txt`,检查依赖是否满足
5. 拓展功能请执行`pip3 install -r requirements-optional.txt`,检查依赖是否满足
6. [FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) 中无类似问题
- type: checkboxes
attributes:
label: 前置确认
options:
- label: 我确认我运行的是最新版本的代码,并且安装了所需的依赖,在[FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs)中也未找到类似问题。
required: true
- type: checkboxes
attributes:
label: ⚠️ 搜索issues中是否已存在类似问题
description: >
请在 [历史issue](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中清空输入框,搜索你的问题
或相关日志的关键词来查找是否存在类似问题。
options:
- label: 我已经搜索过issues和disscussions,没有跟我遇到的问题相关的issue
required: true
- type: markdown
attributes:
value: |
请在上方的`title`中填写你对你所遇到问题的简略总结,这将帮助其他人更好的找到相似问题,谢谢❤️。
- type: dropdown
attributes:
label: 操作系统类型?
description: >
请选择你运行程序的操作系统类型。
options:
- Windows
- Linux
- MacOS
- Docker
- Railway
- Windows Subsystem for Linux (WSL)
- Other (请在问题中说明)
validations:
required: true
- type: dropdown
attributes:
label: 运行的python版本是?
description: |
请选择你运行程序的`python`版本。
注意:在`python 3.7`中,有部分可选依赖无法安装。
经过长时间的观察,我们认为`python 3.8`是兼容性最好的版本。
`python 3.7`~`python 3.10`以外版本的issue,将视情况直接关闭。
options:
- python 3.7
- python 3.8
- python 3.9
- python 3.10
- other
validations:
required: true
- type: dropdown
attributes:
label: 使用的chatgpt-on-wechat版本是?
description: |
请确保你使用的是 [releases](https://github.com/zhayujie/chatgpt-on-wechat/releases) 中的最新版本。
如果你使用git, 请使用`git branch`命令来查看分支。
options:
- Latest Release
- Master (branch)
validations:
required: true
- type: dropdown
attributes:
label: 运行的`channel`类型是?
description: |
请确保你正确配置了该`channel`所需的配置项,所有可选的配置项都写在了[该文件中](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/config.py),请将所需配置项填写在根目录下的`config.json`文件中。
options:
- wx(个人微信, itchat)
- wxy(个人微信, wechaty)
- wechatmp(公众号, 订阅号)
- wechatmp_service(公众号, 服务号)
- terminal
- other
validations:
required: true
- type: textarea
attributes:
label: 复现步骤 🕹
description: |
**⚠️ 不能复现将会关闭issue.**
- type: textarea
attributes:
label: 问题描述 😯
description: 详细描述出现的问题,或提供有关截图。
- type: textarea
attributes:
label: 终端日志 📒
description: |
在此处粘贴终端日志,可在主目录下`run.log`文件中找到,这会帮助我们更好的分析问题,注意隐去你的API key。
如果在配置文件中加入`"debug": true`,打印出的日志会更有帮助。
<details>
<summary><i>示例</i></summary>
```log
[DEBUG][2023-04-16 00:23:22][plugin_manager.py:157] - Plugin SUMMARY triggered by event Event.ON_HANDLE_CONTEXT
[DEBUG][2023-04-16 00:23:22][main.py:221] - [Summary] on_handle_context. content: $总结前100条消息
[DEBUG][2023-04-16 00:23:24][main.py:240] - [Summary] limit: 100, duration: -1 seconds
[ERROR][2023-04-16 00:23:24][chat_channel.py:244] - Worker return exception: name 'start_date' is not defined
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "D:\project\chatgpt-on-wechat\channel\chat_channel.py", line 132, in _handle
reply = self._generate_reply(context)
File "D:\project\chatgpt-on-wechat\channel\chat_channel.py", line 142, in _generate_reply
e_context = PluginManager().emit_event(EventContext(Event.ON_HANDLE_CONTEXT, {
File "D:\project\chatgpt-on-wechat\plugins\plugin_manager.py", line 159, in emit_event
instance.handlers[e_context.event](e_context, *args, **kwargs)
File "D:\project\chatgpt-on-wechat\plugins\summary\main.py", line 255, in on_handle_context
records = self._get_records(session_id, start_time, limit)
File "D:\project\chatgpt-on-wechat\plugins\summary\main.py", line 96, in _get_records
c.execute("SELECT * FROM chat_records WHERE sessionid=? and timestamp>? ORDER BY timestamp DESC LIMIT ?", (session_id, start_date, limit))
NameError: name 'start_date' is not defined
[INFO][2023-04-16 00:23:36][app.py:14] - signal 2 received, exiting...
```
</details>
value: |
```log
<此处粘贴终端日志>
```
+28
View File
@@ -0,0 +1,28 @@
name: Feature request 🚀
description: 提出你对项目的新想法或建议。
labels: ['status: needs check']
body:
- type: markdown
attributes:
value: |
请在上方的`title`中填写简略总结,谢谢❤️。
- type: checkboxes
attributes:
label: ⚠️ 搜索是否存在类似issue
description: >
请在 [历史issue](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中清空输入框,搜索关键词查找是否存在相似issue。
options:
- label: 我已经搜索过issues和disscussions,没有发现相似issue
required: true
- type: textarea
attributes:
label: 总结
description: 描述feature的功能。
- type: textarea
attributes:
label: 举例
description: 提供聊天示例,草图或相关网址。
- type: textarea
attributes:
label: 动机
description: 描述你提出该feature的动机,比如没有这项feature对你的使用造成了怎样的影响。 请提供更详细的场景描述,这可能会帮助我们发现并提出更好的解决方案。
+6
View File
@@ -0,0 +1,6 @@
blank_issues_enabled: false
contact_links:
- name: 知识星球
url: https://public.zsxq.com/groups/88885848842852.html
about: 如果你想了解更多项目细节,并与开发者们交流更多关于AI技术的实践,欢迎加入星球
+31 -42
View File
@@ -2,27 +2,36 @@
> ChatGPT近期以强大的对话和信息整合能力风靡全网,可以写代码、改论文、讲故事,几乎无所不能,这让人不禁有个大胆的想法,能否用他的对话模型把我们的微信打造成一个智能机器人,可以在与好友对话中给出意想不到的回应,而且再也不用担心女朋友影响我们 ~~打游戏~~ 工作了。
最新版本支持的功能如下:
基于ChatGPT的微信聊天机器人,通过 [ChatGPT](https://github.com/openai/openai-python) 接口生成对话内容,使用 [itchat](https://github.com/littlecodersh/ItChat) 实现微信消息的接收和自动回复。已实现的特性如下:
- [x] **文本对话** 接收私聊及群组中的微信消息,使用ChatGPT生成回复内容,完成自动回复
- [x] **规则定制化** 支持私聊中按指定规则触发自动回复,支持对群组设置自动回复白名单
- [x] **图片生成** 支持根据描述生成图片,支持图片修复
- [x] **上下文记忆**:支持多轮对话记忆,且为每个好友维护独立的上下会话
- [x] **语音识别:** 支持接收和处理语音消息,通过文字或语音回复
- [x] **插件化:** 支持个性化插件,提供角色扮演、文字冒险、与操作系统交互、访问网络数据等能力
> 目前支持微信和微信公众号部署,欢迎接入更多应用,参考 [Terminal代码](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/terminal/terminal_channel.py)实现接收和发送消息逻辑即可接入。 同时欢迎增加新的插件,参考 [插件说明文档](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)。
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信,微信公众号和企业微信应用等部署方式
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3,GPT-3.5GPT-4模型
- [x] **语音识别** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai等多种语音模型
- [x] **图片生成** 支持图片生成 和 图生图(如照片修复),可选择 Dell-E, stable diffusion, replicate模型
- [x] **丰富插件** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结等插件
- [X] **Tool工具:** 与操作系统和互联网交互,支持最新信息搜索、数学计算、天气和资讯查询、网页总结,基于 [chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub) 实现
> 欢迎接入更多应用,参考 [Terminal代码](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/terminal/terminal_channel.py)实现接收和发送消息逻辑即可接入。 同时欢迎增加新的插件,参考 [插件说明文档](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)。
**一键部署:**
- 个人微信
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/qApznZ?referralCode=RC3znh)
- 企业微信应用号
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/-FHS--?referralCode=RC3znh)
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/qApznZ?referralCode=RC3znh)
# 演示
https://user-images.githubusercontent.com/26161723/233777277-e3b9928e-b88f-43e2-b0e0-3cbc923bc799.mp4
Demo made by [Visionn](https://www.wangpc.cc/)
# 更新日志
>**2023.04.05** 支持微信公众号部署,兼容角色扮演等预设插件[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatmp/README.md)。(contributed by [@JS00000](https://github.com/JS00000) in [#686](https://github.com/zhayujie/chatgpt-on-wechat/pull/686))
>**2023.04.26** 支持企业微信应用号部署,兼容插件,并支持语音图片交互,支持Railway部署[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatcom/README.md)。(contributed by [@lanvent](https://github.com/lanvent) in [#944](https://github.com/zhayujie/chatgpt-on-wechat/pull/944))
>**2023.04.05** 支持微信公众号部署,兼容插件,并支持语音图片交互,[使用文档](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/channel/wechatmp/README.md)。(contributed by [@JS00000](https://github.com/JS00000) in [#686](https://github.com/zhayujie/chatgpt-on-wechat/pull/686))
>**2023.04.05** 增加能让ChatGPT使用工具的`tool`插件,[使用文档](https://github.com/goldfishh/chatgpt-on-wechat/blob/master/plugins/tool/README.md)。工具相关issue可反馈至[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)。(contributed by [@goldfishh](https://github.com/goldfishh) in [#663](https://github.com/zhayujie/chatgpt-on-wechat/pull/663))
@@ -32,28 +41,7 @@
>**2023.03.02** 接入[ChatGPT API](https://platform.openai.com/docs/guides/chat) (gpt-3.5-turbo),默认使用该模型进行对话,需升级openai依赖 (`pip3 install --upgrade openai`)。网络问题参考 [#351](https://github.com/zhayujie/chatgpt-on-wechat/issues/351)
>**2023.02.09** 扫码登录存在封号风险,请谨慎使用,参考[#58](https://github.com/AutumnWhj/ChatGPT-wechat-bot/issues/158)
>**2023.02.05** 在openai官方接口方案中 (GPT-3模型) 实现上下文对话
>**2022.12.18** 支持根据描述生成图片并发送,openai版本需大于0.25.0
>**2022.12.17** 原来的方案是从 [ChatGPT页面](https://chat.openai.com/chat) 获取session_token,使用 [revChatGPT](https://github.com/acheong08/ChatGPT) 直接访问web接口,但随着ChatGPT接入Cloudflare人机验证,这一方案难以在服务器顺利运行。 所以目前使用的方案是调用 OpenAI 官方提供的 [API](https://beta.openai.com/docs/api-reference/introduction),回复质量上基本接近于ChatGPT的内容,劣势是暂不支持有上下文记忆的对话,优势是稳定性和响应速度较好。
# 使用效果
### 个人聊天
![single-chat-sample.jpg](docs/images/single-chat-sample.jpg)
### 群组聊天
![group-chat-sample.jpg](docs/images/group-chat-sample.jpg)
### 图片生成
![group-chat-sample.jpg](docs/images/image-create-sample.jpg)
>**2023.02.09** 扫码登录存在账号限制风险,请谨慎使用,参考[#58](https://github.com/AutumnWhj/ChatGPT-wechat-bot/issues/158)
# 快速开始
@@ -63,7 +51,7 @@
前往 [OpenAI注册页面](https://beta.openai.com/signup) 创建账号,参考这篇 [教程](https://www.pythonthree.com/register-openai-chatgpt/) 可以通过虚拟手机号来接收验证码。创建完账号则前往 [API管理页面](https://beta.openai.com/account/api-keys) 创建一个 API Key 并保存下来,后面需要在项目中配置这个key。
> 项目中使用的对话模型是 davinci,计费方式是约每 750 字 (包含请求和回复) 消耗 $0.02,图片生成是每张消耗 $0.016,账号创建有免费的 $18 额度 (更新3.25: 最新注册的已经无免费额度了),使用完可以更换邮箱重新注册
> 项目中默认使用的对话模型是 gpt3.5 turbo,计费方式是约每 500 字 (包含请求和回复) 消耗 $0.002,图片生成是每张消耗 $0.016。
### 2.运行环境
@@ -99,15 +87,13 @@ pip3 install -r requirements-optional.txt
参考[#415](https://github.com/zhayujie/chatgpt-on-wechat/issues/415)
使用`azure`语音功能需安装依赖(列在`requirements-optional.txt`内,但为便于`railway`部署已注释):
使用`azure`语音功能需安装依赖,并参考[文档](https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/quickstarts/setup-platform?pivots=programming-language-python&tabs=linux%2Cubuntu%2Cdotnet%2Cjre%2Cmaven%2Cnodejs%2Cmac%2Cpypi)的环境要求。
:
```bash
pip3 install azure-cognitiveservices-speech
```
> 目前默认发布的镜像和`railway`部署,都基于`apline`,无法安装`azure`的依赖。若有需求请自行基于[`debian`](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/docker/Dockerfile.debian.latest)打包。
参考[文档](https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/quickstarts/setup-platform?pivots=programming-language-python&tabs=linux%2Cubuntu%2Cdotnet%2Cjre%2Cmaven%2Cnodejs%2Cmac%2Cpypi)
## 配置
配置文件的模板在根目录的`config-template.json`中,需复制该模板创建最终生效的 `config.json` 文件:
@@ -116,7 +102,7 @@ pip3 install azure-cognitiveservices-speech
cp config-template.json config.json
```
然后在`config.json`中填入配置,以下是对默认配置的说明,可根据需要进行自定义修改:
然后在`config.json`中填入配置,以下是对默认配置的说明,可根据需要进行自定义修改(请去掉注释)
```bash
# config.json文件内容示例
@@ -134,7 +120,9 @@ pip3 install azure-cognitiveservices-speech
"speech_recognition": false, # 是否开启语音识别
"group_speech_recognition": false, # 是否开启群组语音识别
"use_azure_chatgpt": false, # 是否使用Azure ChatGPT service代替openai ChatGPT service. 当设置为true时需要设置 open_ai_api_base,如 https://xxx.openai.azure.com/
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述,
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述
# 订阅消息,公众号和企业微信channel中请填写,当被订阅时会自动回复,可使用特殊占位符。目前支持的占位符有{trigger_prefix},在程序中它会自动替换成bot的触发词。
"subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持角色扮演和文字冒险等丰富插件。\n输入{trigger_prefix}#help 查看详细指令。"
}
```
**配置说明:**
@@ -169,6 +157,7 @@ pip3 install azure-cognitiveservices-speech
+ `clear_memory_commands`: 对话内指令,主动清空前文记忆,字符串数组可自定义指令别名。
+ `hot_reload`: 程序退出后,暂存微信扫码状态,默认关闭。
+ `character_desc` 配置中保存着你对机器人说的一段话,他会记住这段话并作为他的设定,你可以为他定制任何人格 (关于会话上下文的更多内容参考该 [issue](https://github.com/zhayujie/chatgpt-on-wechat/issues/43))
+ `subscribe_msg`:订阅消息,公众号和企业微信channel中请填写,当被订阅时会自动回复, 可使用特殊占位符。目前支持的占位符有{trigger_prefix},在程序中它会自动替换成bot的触发词。
**所有可选的配置项均在该[文件](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/config.py)中列出。**
@@ -203,7 +192,7 @@ nohup python3 app.py & tail -f nohup.out # 在后台运行程序并通
参考文档 [Docker部署](https://github.com/limccn/chatgpt-on-wechat/wiki/Docker%E9%83%A8%E7%BD%B2) (Contributed by [limccn](https://github.com/limccn))。
### 4. Railway部署(✅推荐)
### 4. Railway部署 (✅推荐)
> Railway每月提供5刀和最多500小时的免费额度。
1. 进入 [Railway](https://railway.app/template/qApznZ?referralCode=RC3znh)。
2. 点击 `Deploy Now` 按钮。
+1 -1
View File
@@ -43,7 +43,7 @@ def run():
# os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:9001'
channel = channel_factory.create_channel(channel_name)
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service"]:
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app"]:
PluginManager().load_plugins()
# startup channel
+24
View File
@@ -4,6 +4,7 @@ import time
import openai
import openai.error
import requests
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
@@ -155,3 +156,26 @@ class AzureChatGPTBot(ChatGPTBot):
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
self.args["deployment_id"] = conf().get("azure_deployment_id")
def create_img(self, query, retry_count=0, api_key=None):
api_version = "2022-08-03-preview"
url = "{}dalle/text-to-image?api-version={}".format(openai.api_base, api_version)
api_key = api_key or openai.api_key
headers = {"api-key": api_key, "Content-Type": "application/json"}
try:
body = {"caption": query, "resolution": conf().get("image_create_size", "256x256")}
submission = requests.post(url, headers=headers, json=body)
operation_location = submission.headers["Operation-Location"]
retry_after = submission.headers["Retry-after"]
status = ""
image_url = ""
while status != "Succeeded":
logger.info("waiting for image create..., " + status + ",retry after " + retry_after + " seconds")
time.sleep(int(retry_after))
response = requests.get(operation_location, headers=headers)
status = response.json()["status"]
image_url = response.json()["result"]["contentUrl"]
return True, image_url
except Exception as e:
logger.error("create image error: {}".format(e))
return False, "图片生成失败"
+2 -1
View File
@@ -15,12 +15,13 @@ class OpenAIImage(object):
if conf().get("rate_limit_dalle"):
self.tb4dalle = TokenBucket(conf().get("rate_limit_dalle", 50))
def create_img(self, query, retry_count=0):
def create_img(self, query, retry_count=0, api_key=None):
try:
if conf().get("rate_limit_dalle") and not self.tb4dalle.get_token():
return False, "请求太快了,请休息一下再问我吧"
logger.info("[OPEN_AI] image_query={}".format(query))
response = openai.Image.create(
api_key=api_key,
prompt=query, # 图片描述
n=1, # 每次生成图片的数量
size=conf().get("image_create_size", "256x256"), # 图片大小,可选有 256x256, 512x512, 1024x1024
+12 -5
View File
@@ -1,11 +1,12 @@
from bot import bot_factory
from bot.bot_factory import create_bot
from bridge.context import Context
from bridge.reply import Reply
from common import const
from common.log import logger
from common.singleton import singleton
from config import conf
from voice import voice_factory
from translate.factory import create_translator
from voice.factory import create_voice
@singleton
@@ -15,6 +16,7 @@ class Bridge(object):
"chat": const.CHATGPT,
"voice_to_text": conf().get("voice_to_text", "openai"),
"text_to_voice": conf().get("text_to_voice", "google"),
"translate": conf().get("translate", "baidu"),
}
model_type = conf().get("model")
if model_type in ["text-davinci-003"]:
@@ -27,11 +29,13 @@ class Bridge(object):
if self.bots.get(typename) is None:
logger.info("create bot {} for {}".format(self.btype[typename], typename))
if typename == "text_to_voice":
self.bots[typename] = voice_factory.create_voice(self.btype[typename])
self.bots[typename] = create_voice(self.btype[typename])
elif typename == "voice_to_text":
self.bots[typename] = voice_factory.create_voice(self.btype[typename])
self.bots[typename] = create_voice(self.btype[typename])
elif typename == "chat":
self.bots[typename] = bot_factory.create_bot(self.btype[typename])
self.bots[typename] = create_bot(self.btype[typename])
elif typename == "translate":
self.bots[typename] = create_translator(self.btype[typename])
return self.bots[typename]
def get_bot_type(self, typename):
@@ -45,3 +49,6 @@ class Bridge(object):
def fetch_text_to_voice(self, text) -> Reply:
return self.get_bot("text_to_voice").textToVoice(text)
def fetch_translate(self, text, from_lang="", to_lang="en") -> Reply:
return self.get_bot("translate").translate(text, from_lang, to_lang)
+4
View File
@@ -29,4 +29,8 @@ def create_channel(channel_type):
from channel.wechatmp.wechatmp_channel import WechatMPChannel
return WechatMPChannel(passive_reply=False)
elif channel_type == "wechatcom_app":
from channel.wechatcom.wechatcomapp_channel import WechatComAppChannel
return WechatComAppChannel()
raise RuntimeError
+3 -1
View File
@@ -48,6 +48,8 @@ class ChatChannel(Channel):
if first_in: # context首次传入时,receiver是None,根据类型设置receiver
config = conf()
cmsg = context["msg"]
user_data = conf().get_user_data(cmsg.from_user_id)
context["openai_api_key"] = user_data.get("openai_api_key")
if context.get("isgroup", False):
group_name = cmsg.other_user_nickname
group_id = cmsg.other_user_id
@@ -119,7 +121,7 @@ class ChatChannel(Channel):
pass
else:
return None
content = content.strip()
img_match_prefix = check_prefix(content, conf().get("image_create_prefix"))
if img_match_prefix:
content = content.replace(img_match_prefix, "", 1)
+8 -17
View File
@@ -29,7 +29,7 @@ from plugins import *
@itchat.msg_register([TEXT, VOICE, PICTURE, NOTE])
def handler_single_msg(msg):
try:
cmsg = WeChatMessage(msg, False)
cmsg = WechatMessage(msg, False)
except NotImplementedError as e:
logger.debug("[WX]single message {} skipped: {}".format(msg["MsgId"], e))
return None
@@ -40,7 +40,7 @@ def handler_single_msg(msg):
@itchat.msg_register([TEXT, VOICE, PICTURE, NOTE], isGroupChat=True)
def handler_group_msg(msg):
try:
cmsg = WeChatMessage(msg, True)
cmsg = WechatMessage(msg, True)
except NotImplementedError as e:
logger.debug("[WX]group message {} skipped: {}".format(msg["MsgId"], e))
return None
@@ -113,21 +113,12 @@ class WechatChannel(ChatChannel):
# login by scan QRCode
hotReload = conf().get("hot_reload", False)
status_path = os.path.join(get_appdata_dir(), "itchat.pkl")
try:
itchat.auto_login(
enableCmdQR=2,
hotReload=hotReload,
statusStorageDir=status_path,
qrCallback=qrCallback,
)
except Exception as e:
if hotReload:
logger.error("Hot reload failed, try to login without hot reload")
itchat.logout()
os.remove(status_path)
itchat.auto_login(enableCmdQR=2, hotReload=hotReload, qrCallback=qrCallback)
else:
raise e
itchat.auto_login(
enableCmdQR=2,
hotReload=hotReload,
statusStorageDir=status_path,
qrCallback=qrCallback,
)
self.user_id = itchat.instance.storageClass.userName
self.name = itchat.instance.storageClass.nickName
logger.info("Wechat login success, user_id: {}, nickname: {}".format(self.user_id, self.name))
+1 -1
View File
@@ -8,7 +8,7 @@ from lib import itchat
from lib.itchat.content import *
class WeChatMessage(ChatMessage):
class WechatMessage(ChatMessage):
def __init__(self, itchat_msg, is_group=False):
super().__init__(itchat_msg)
self.msg_id = itchat_msg["MsgId"]
+87
View File
@@ -0,0 +1,87 @@
# 企业微信应用号channel
企业微信官方提供了客服、应用等API,本channel使用的是企业微信的应用API的能力。
因为未来可能还会开发客服能力,所以本channel的类型名叫作`wechatcom_app`
`wechatcom_app` channel支持插件系统和图片声音交互等能力,除了无法加入群聊,作为个人使用的私人助理已绰绰有余。
## 开始之前
- 在企业中确认自己拥有在企业内自建应用的权限。
- 如果没有权限或者是个人用户,也可创建未认证的企业。操作方式:登录手机企业微信,选择`创建/加入企业`来创建企业,类型请选择企业,企业名称可随意填写。
未认证的企业有100人的服务人数上限,其他功能与认证企业没有差异。
本channel需安装的依赖与公众号一致,需要安装`wechatpy``web.py`,它们包含在`requirements-optional.txt`中。
此外,如果你是`Linux`系统,除了`ffmpeg`还需要安装`amr`编码器,否则会出现找不到编码器的错误,无法正常使用语音功能。
- Ubuntu/Debian
```bash
apt-get install libavcodec-extra
```
- Alpine
需自行编译`ffmpeg`,在编译参数里加入`amr`编码器的支持
## 使用方法
1.查看企业ID
- 扫码登陆[企业微信后台](https://work.weixin.qq.com)
- 选择`我的企业`,点击`企业信息`,记住该`企业ID`
2.创建自建应用
- 选择应用管理, 在自建区选创建应用来创建企业自建应用
- 上传应用logo,填写应用名称等项
- 创建应用后进入应用详情页面,记住`AgentId``Secert`
3.配置应用
- 在详情页点击`企业可信IP`的配置(没看到可以不管),填入你服务器的公网IP,如果不知道可以先不填
- 点击`接收消息`下的启用API接收消息
- `URL`填写格式为`http://url:port/wxcomapp``port`是程序监听的端口,默认是9898
如果是未认证的企业,url可直接使用服务器的IP。如果是认证企业,需要使用备案的域名,可使用二级域名。
- `Token`可随意填写,停留在这个页面
- 在程序根目录`config.json`中增加配置(**去掉注释**),`wechatcomapp_aes_key`是当前页面的`wechatcomapp_aes_key`
```python
"channel_type": "wechatcom_app",
"wechatcom_corp_id": "", # 企业微信公司的corpID
"wechatcomapp_token": "", # 企业微信app的token
"wechatcomapp_port": 9898, # 企业微信app的服务端口, 不需要端口转发
"wechatcomapp_secret": "", # 企业微信app的secret
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
```
- 运行程序,在页面中点击保存,保存成功说明验证成功
4.连接个人微信
选择`我的企业`,点击`微信插件`,下面有个邀请关注的二维码。微信扫码后,即可在微信中看到对应企业,在这里你便可以和机器人沟通。
向机器人发送消息,如果日志里出现报错:
```bash
Error code: 60020, message: "not allow to access from your ip, ...from ip: xx.xx.xx.xx"
```
意思是IP不可信,需要参考上一步的`企业可信IP`配置,把这里的IP加进去。
### Railway部署方式
公众号不能在`Railway`上部署,但企业微信应用[可以](https://railway.app/template/-FHS--?referralCode=RC3znh)!
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/-FHS--?referralCode=RC3znh)
填写配置后,将部署完成后的网址```**.railway.app/wxcomapp```,填写在上一步的URL中。发送信息后观察日志,把报错的IP加入到可信IP。(每次重启后都需要加入可信IP)
## 测试体验
AIGC开放社区中已经部署了多个可免费使用的Bot,扫描下方的二维码会自动邀请你来体验。
<img width="200" src="../../docs/images/aigcopen.png">
+178
View File
@@ -0,0 +1,178 @@
# -*- coding=utf-8 -*-
import io
import os
import time
import requests
import web
from wechatpy.enterprise import create_reply, parse_message
from wechatpy.enterprise.crypto import WeChatCrypto
from wechatpy.enterprise.exceptions import InvalidCorpIdException
from wechatpy.exceptions import InvalidSignatureException, WeChatClientException
from bridge.context import Context
from bridge.reply import Reply, ReplyType
from channel.chat_channel import ChatChannel
from channel.wechatcom.wechatcomapp_client import WechatComAppClient
from channel.wechatcom.wechatcomapp_message import WechatComAppMessage
from common.log import logger
from common.singleton import singleton
from common.utils import compress_imgfile, fsize, split_string_by_utf8_length
from config import conf, subscribe_msg
from voice.audio_convert import any_to_amr, split_audio
MAX_UTF8_LEN = 2048
@singleton
class WechatComAppChannel(ChatChannel):
NOT_SUPPORT_REPLYTYPE = []
def __init__(self):
super().__init__()
self.corp_id = conf().get("wechatcom_corp_id")
self.secret = conf().get("wechatcomapp_secret")
self.agent_id = conf().get("wechatcomapp_agent_id")
self.token = conf().get("wechatcomapp_token")
self.aes_key = conf().get("wechatcomapp_aes_key")
print(self.corp_id, self.secret, self.agent_id, self.token, self.aes_key)
logger.info(
"[wechatcom] init: corp_id: {}, secret: {}, agent_id: {}, token: {}, aes_key: {}".format(self.corp_id, self.secret, self.agent_id, self.token, self.aes_key)
)
self.crypto = WeChatCrypto(self.token, self.aes_key, self.corp_id)
self.client = WechatComAppClient(self.corp_id, self.secret)
def startup(self):
# start message listener
urls = ("/wxcomapp", "channel.wechatcom.wechatcomapp_channel.Query")
app = web.application(urls, globals(), autoreload=False)
port = conf().get("wechatcomapp_port", 9898)
web.httpserver.runsimple(app.wsgifunc(), ("0.0.0.0", port))
def send(self, reply: Reply, context: Context):
receiver = context["receiver"]
if reply.type in [ReplyType.TEXT, ReplyType.ERROR, ReplyType.INFO]:
reply_text = reply.content
texts = split_string_by_utf8_length(reply_text, MAX_UTF8_LEN)
if len(texts) > 1:
logger.info("[wechatcom] text too long, split into {} parts".format(len(texts)))
for i, text in enumerate(texts):
self.client.message.send_text(self.agent_id, receiver, text)
if i != len(texts) - 1:
time.sleep(0.5) # 休眠0.5秒,防止发送过快乱序
logger.info("[wechatcom] Do send text to {}: {}".format(receiver, reply_text))
elif reply.type == ReplyType.VOICE:
try:
media_ids = []
file_path = reply.content
amr_file = os.path.splitext(file_path)[0] + ".amr"
any_to_amr(file_path, amr_file)
duration, files = split_audio(amr_file, 60 * 1000)
if len(files) > 1:
logger.info("[wechatcom] voice too long {}s > 60s , split into {} parts".format(duration / 1000.0, len(files)))
for path in files:
response = self.client.media.upload("voice", open(path, "rb"))
logger.debug("[wechatcom] upload voice response: {}".format(response))
media_ids.append(response["media_id"])
except WeChatClientException as e:
logger.error("[wechatcom] upload voice failed: {}".format(e))
return
try:
os.remove(file_path)
if amr_file != file_path:
os.remove(amr_file)
except Exception:
pass
for media_id in media_ids:
self.client.message.send_voice(self.agent_id, receiver, media_id)
time.sleep(1)
logger.info("[wechatcom] sendVoice={}, receiver={}".format(reply.content, receiver))
elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
img_url = reply.content
pic_res = requests.get(img_url, stream=True)
image_storage = io.BytesIO()
for block in pic_res.iter_content(1024):
image_storage.write(block)
sz = fsize(image_storage)
if sz >= 10 * 1024 * 1024:
logger.info("[wechatcom] image too large, ready to compress, sz={}".format(sz))
image_storage = compress_imgfile(image_storage, 10 * 1024 * 1024 - 1)
logger.info("[wechatcom] image compressed, sz={}".format(fsize(image_storage)))
image_storage.seek(0)
try:
response = self.client.media.upload("image", image_storage)
logger.debug("[wechatcom] upload image response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatcom] upload image failed: {}".format(e))
return
self.client.message.send_image(self.agent_id, receiver, response["media_id"])
logger.info("[wechatcom] sendImage url={}, receiver={}".format(img_url, receiver))
elif reply.type == ReplyType.IMAGE: # 从文件读取图片
image_storage = reply.content
sz = fsize(image_storage)
if sz >= 10 * 1024 * 1024:
logger.info("[wechatcom] image too large, ready to compress, sz={}".format(sz))
image_storage = compress_imgfile(image_storage, 10 * 1024 * 1024 - 1)
logger.info("[wechatcom] image compressed, sz={}".format(fsize(image_storage)))
image_storage.seek(0)
try:
response = self.client.media.upload("image", image_storage)
logger.debug("[wechatcom] upload image response: {}".format(response))
except WeChatClientException as e:
logger.error("[wechatcom] upload image failed: {}".format(e))
return
self.client.message.send_image(self.agent_id, receiver, response["media_id"])
logger.info("[wechatcom] sendImage, receiver={}".format(receiver))
class Query:
def GET(self):
channel = WechatComAppChannel()
params = web.input()
logger.info("[wechatcom] receive params: {}".format(params))
try:
signature = params.msg_signature
timestamp = params.timestamp
nonce = params.nonce
echostr = params.echostr
echostr = channel.crypto.check_signature(signature, timestamp, nonce, echostr)
except InvalidSignatureException:
raise web.Forbidden()
return echostr
def POST(self):
channel = WechatComAppChannel()
params = web.input()
logger.info("[wechatcom] receive params: {}".format(params))
try:
signature = params.msg_signature
timestamp = params.timestamp
nonce = params.nonce
message = channel.crypto.decrypt_message(web.data(), signature, timestamp, nonce)
except (InvalidSignatureException, InvalidCorpIdException):
raise web.Forbidden()
msg = parse_message(message)
logger.debug("[wechatcom] receive message: {}, msg= {}".format(message, msg))
if msg.type == "event":
if msg.event == "subscribe":
reply_content = subscribe_msg()
if reply_content:
reply = create_reply(reply_content, msg).render()
res = channel.crypto.encrypt_message(reply, nonce, timestamp)
return res
else:
try:
wechatcom_msg = WechatComAppMessage(msg, client=channel.client)
except NotImplementedError as e:
logger.debug("[wechatcom] " + str(e))
return "success"
context = channel._compose_context(
wechatcom_msg.ctype,
wechatcom_msg.content,
isgroup=False,
msg=wechatcom_msg,
)
if context:
channel.produce(context)
return "success"
+21
View File
@@ -0,0 +1,21 @@
import threading
import time
from wechatpy.enterprise import WeChatClient
class WechatComAppClient(WeChatClient):
def __init__(self, corp_id, secret, access_token=None, session=None, timeout=None, auto_retry=True):
super(WechatComAppClient, self).__init__(corp_id, secret, access_token, session, timeout, auto_retry)
self.fetch_access_token_lock = threading.Lock()
def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
with self.fetch_access_token_lock:
access_token = self.session.get(self.access_token_key)
if access_token:
if not self.expires_at:
return access_token
timestamp = time.time()
if self.expires_at - timestamp > 60:
return access_token
return super().fetch_access_token()
+52
View File
@@ -0,0 +1,52 @@
from wechatpy.enterprise import WeChatClient
from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from common.tmp_dir import TmpDir
class WechatComAppMessage(ChatMessage):
def __init__(self, msg, client: WeChatClient, is_group=False):
super().__init__(msg)
self.msg_id = msg.id
self.create_time = msg.time
self.is_group = is_group
if msg.type == "text":
self.ctype = ContextType.TEXT
self.content = msg.content
elif msg.type == "voice":
self.ctype = ContextType.VOICE
self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
def download_voice():
# 如果响应状态码是200,则将响应内容写入本地文件
response = client.media.download(msg.media_id)
if response.status_code == 200:
with open(self.content, "wb") as f:
f.write(response.content)
else:
logger.info(f"[wechatcom] Failed to download voice file, {response.content}")
self._prepare_fn = download_voice
elif msg.type == "image":
self.ctype = ContextType.IMAGE
self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
def download_image():
# 如果响应状态码是200,则将响应内容写入本地文件
response = client.media.download(msg.media_id)
if response.status_code == 200:
with open(self.content, "wb") as f:
f.write(response.content)
else:
logger.info(f"[wechatcom] Failed to download image file, {response.content}")
self._prepare_fn = download_image
else:
raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type))
self.from_user_id = msg.source
self.to_user_id = msg.target
self.other_user_id = msg.source
+1 -1
View File
@@ -16,7 +16,7 @@ pip3 install wechatpy
然后在[微信公众平台](https://mp.weixin.qq.com)注册一个自己的公众号,类型选择订阅号,主体为个人即可。
然后根据[接入指南](https://developers.weixin.qq.com/doc/offiaccount/Basic_Information/Access_Overview.html)的说明,在[微信公众平台](https://mp.weixin.qq.com)的“设置与开发”-“基本配置”-“服务器配置”中填写服务器地址`URL`和令牌`Token`这里的`URL``example.com/wx`的形式,不可以使用IP`Token`是你自己编的一个特定的令牌。消息加解密方式如果选择了需要加密的模式,需要在配置中填写`wechatmp_aes_key`
然后根据[接入指南](https://developers.weixin.qq.com/doc/offiaccount/Basic_Information/Access_Overview.html)的说明,在[微信公众平台](https://mp.weixin.qq.com)的“设置与开发”-“基本配置”-“服务器配置”中填写服务器地址`URL`和令牌`Token``URL`填写格式为`http://url/wx`,可使用IP(成功几率看脸)`Token`是你自己编的一个特定的令牌。消息加解密方式如果选择了需要加密的模式,需要在配置中填写`wechatmp_aes_key`
相关的服务器验证代码已经写好,你不需要再添加任何代码。你只需要在本项目根目录的`config.json`中添加
```
+5 -8
View File
@@ -10,7 +10,7 @@ from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_channel import WechatMPChannel
from channel.wechatmp.wechatmp_message import WeChatMPMessage
from common.log import logger
from config import conf
from config import conf, subscribe_msg
# This class is instantiated once per query
@@ -55,10 +55,6 @@ class Query:
else:
context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, msg=wechatmp_msg)
if context:
# set private openai_api_key
# if from_user is not changed in itchat, this can be placed at chat_channel
user_data = conf().get_user_data(from_user)
context["openai_api_key"] = user_data.get("openai_api_key") # None or user openai_api_key
channel.produce(context)
# The reply will be sent by channel.send() in another thread
return "success"
@@ -66,13 +62,14 @@ class Query:
logger.info("[wechatmp] Event {} from {}".format(msg.event, msg.source))
if msg.event in ["subscribe", "subscribe_scan"]:
reply_text = subscribe_msg()
replyPost = create_reply(reply_text, msg)
return encrypt_func(replyPost.render())
if reply_text:
replyPost = create_reply(reply_text, msg)
return encrypt_func(replyPost.render())
else:
return "success"
else:
logger.info("暂且不处理")
return "success"
return "success"
except Exception as exc:
logger.exception(exc)
return exc
-35
View File
@@ -1,5 +1,3 @@
import textwrap
import web
from wechatpy.crypto import WeChatCrypto
from wechatpy.exceptions import InvalidSignatureException
@@ -27,36 +25,3 @@ def verify_server(data):
raise web.Forbidden("Invalid signature")
except Exception as e:
raise web.Forbidden(str(e))
def subscribe_msg():
trigger_prefix = conf().get("single_chat_prefix", [""])[0]
msg = textwrap.dedent(
f"""\
感谢您的关注!
这里是ChatGPT,可以自由对话。
资源有限,回复较慢,请勿着急。
支持语音对话。
支持图片输入。
支持图片输出,画字开头的消息将按要求创作图片。
支持tool、角色扮演和文字冒险等丰富的插件。
输入'{trigger_prefix}#帮助' 查看详细指令。"""
)
return msg
def split_string_by_utf8_length(string, max_length, max_split=0):
encoded = string.encode("utf-8")
start, end = 0, 0
result = []
while end < len(encoded):
if max_split > 0 and len(result) >= max_split:
result.append(encoded[start:].decode("utf-8"))
break
end = min(start + max_length, len(encoded))
# 如果当前字节不是 UTF-8 编码的开始字节,则向前查找直到找到开始字节为止
while end < len(encoded) and (encoded[end] & 0b11000000) == 0b10000000:
end -= 1
result.append(encoded[start:end].decode("utf-8"))
start = end
return result
+7 -10
View File
@@ -4,14 +4,15 @@ import time
import web
from wechatpy import parse_message
from wechatpy.replies import ImageReply, VoiceReply, create_reply
import textwrap
from bridge.context import *
from bridge.reply import *
from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_channel import WechatMPChannel
from channel.wechatmp.wechatmp_message import WeChatMPMessage
from common.log import logger
from config import conf
from common.utils import split_string_by_utf8_length
from config import conf, subscribe_msg
# This class is instantiated once per query
@@ -61,10 +62,6 @@ class Query:
logger.debug("[wechatmp] context: {} {} {}".format(context, wechatmp_msg, supported))
if supported and context:
# set private openai_api_key
# if from_user is not changed in itchat, this can be placed at chat_channel
user_data = conf().get_user_data(from_user)
context["openai_api_key"] = user_data.get("openai_api_key")
channel.running.add(from_user)
channel.produce(context)
else:
@@ -199,14 +196,14 @@ class Query:
logger.info("[wechatmp] Event {} from {}".format(msg.event, msg.source))
if msg.event in ["subscribe", "subscribe_scan"]:
reply_text = subscribe_msg()
replyPost = create_reply(reply_text, msg)
return encrypt_func(replyPost.render())
if reply_text:
replyPost = create_reply(reply_text, msg)
return encrypt_func(replyPost.render())
else:
return "success"
else:
logger.info("暂且不处理")
return "success"
return "success"
except Exception as exc:
logger.exception(exc)
return exc
+4 -1
View File
@@ -18,6 +18,7 @@ from channel.wechatmp.common import *
from channel.wechatmp.wechatmp_client import WechatMPClient
from common.log import logger
from common.singleton import singleton
from common.utils import split_string_by_utf8_length
from config import conf
from voice.audio_convert import any_to_mp3
@@ -140,8 +141,10 @@ class WechatMPChannel(ChatChannel):
texts = split_string_by_utf8_length(reply_text, MAX_UTF8_LEN)
if len(texts) > 1:
logger.info("[wechatmp] text too long, split into {} parts".format(len(texts)))
for text in texts:
for i, text in enumerate(texts):
self.client.message.send_text(receiver, text)
if i != len(texts) - 1:
time.sleep(0.5) # 休眠0.5秒,防止发送过快乱序
logger.info("[wechatmp] Do send text to {}: {}".format(receiver, reply_text))
elif reply.type == ReplyType.VOICE:
try:
+12 -3
View File
@@ -12,6 +12,8 @@ class WechatMPClient(WeChatClient):
def __init__(self, appid, secret, access_token=None, session=None, timeout=None, auto_retry=True):
super(WechatMPClient, self).__init__(appid, secret, access_token, session, timeout, auto_retry)
self.fetch_access_token_lock = threading.Lock()
self.clear_quota_lock = threading.Lock()
self.last_clear_quota_time = -1
def clear_quota(self):
return self.post("clear_quota", data={"appid": self.appid})
@@ -35,6 +37,13 @@ class WechatMPClient(WeChatClient):
return super()._request(method, url_or_endpoint, **kwargs)
except APILimitedException as e:
logger.error("[wechatmp] API quata has been used up. {}".format(e))
response = self.clear_quota_v2()
logger.debug("[wechatmp] API quata has been cleard, {}".format(response))
return super()._request(method, url_or_endpoint, **kwargs)
if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
with self.clear_quota_lock:
if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
self.last_clear_quota_time = time.time()
response = self.clear_quota_v2()
logger.debug("[wechatmp] API quata has been cleard, {}".format(response))
return super()._request(method, url_or_endpoint, **kwargs)
else:
logger.error("[wechatmp] last clear quota time is {}, less than 60s, skip clear quota")
raise e
+2
View File
@@ -3,3 +3,5 @@ OPEN_AI = "openAI"
CHATGPT = "chatGPT"
BAIDU = "baidu"
CHATGPTONAZURE = "chatGPTOnAzure"
VERSION = "1.3.0"
+51
View File
@@ -0,0 +1,51 @@
import io
import os
from PIL import Image
def fsize(file):
if isinstance(file, io.BytesIO):
return file.getbuffer().nbytes
elif isinstance(file, str):
return os.path.getsize(file)
elif hasattr(file, "seek") and hasattr(file, "tell"):
pos = file.tell()
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(pos)
return size
else:
raise TypeError("Unsupported type")
def compress_imgfile(file, max_size):
if fsize(file) <= max_size:
return file
file.seek(0)
img = Image.open(file)
rgb_image = img.convert("RGB")
quality = 95
while True:
out_buf = io.BytesIO()
rgb_image.save(out_buf, "JPEG", quality=quality)
if fsize(out_buf) <= max_size:
return out_buf
quality -= 5
def split_string_by_utf8_length(string, max_length, max_split=0):
encoded = string.encode("utf-8")
start, end = 0, 0
result = []
while end < len(encoded):
if max_split > 0 and len(result) >= max_split:
result.append(encoded[start:].decode("utf-8"))
break
end = min(start + max_length, len(encoded))
# 如果当前字节不是 UTF-8 编码的开始字节,则向前查找直到找到开始字节为止
while end < len(encoded) and (encoded[end] & 0b11000000) == 0b10000000:
end -= 1
result.append(encoded[start:end].decode("utf-8"))
start = end
return result
+2 -1
View File
@@ -27,5 +27,6 @@
"voice_reply_voice": false,
"conversation_max_tokens": 1000,
"expires_in_seconds": 3600,
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。"
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
"subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。"
}
+28 -3
View File
@@ -8,6 +8,7 @@ import pickle
from common.log import logger
# 将所有可用的配置项写在字典里, 请使用小写字母
# 此处的配置值无实际意义,程序不会读取此处的配置,仅用于提示格式,请将配置加入到config.json中
available_setting = {
# openai api配置
"open_ai_api_key": "", # openai api key
@@ -66,6 +67,11 @@ available_setting = {
"chat_time_module": False, # 是否开启服务时间限制
"chat_start_time": "00:00", # 服务开始时间
"chat_stop_time": "24:00", # 服务结束时间
# 翻译api
"translate": "baidu", # 翻译api,支持baidu
# baidu翻译api的配置
"baidu_translate_app_id": "", # 百度翻译api的appid
"baidu_translate_app_key": "", # 百度翻译api的秘钥
# itchat的配置
"hot_reload": False, # 是否开启热重载
# wechaty的配置
@@ -76,10 +82,19 @@ available_setting = {
"wechatmp_app_id": "", # 微信公众平台的appID
"wechatmp_app_secret": "", # 微信公众平台的appsecret
"wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
# wechatcom的通用配置
"wechatcom_corp_id": "", # 企业微信公司的corpID
# wechatcomapp的配置
"wechatcomapp_token": "", # 企业微信app的token
"wechatcomapp_port": 9898, # 企业微信app的服务端口,不需要端口转发
"wechatcomapp_secret": "", # 企业微信app的secret
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
# chatgpt指令自定义触发词
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头
# channel配置
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp,wechatmp_service}
"channel_type": "wx", # 通道类型,支持:{wx,wxy,terminal,wechatmp,wechatmp_service,wechatcom_app}
"subscribe_msg": "", # 订阅消息, 支持: wechatmp, wechatmp_service, wechatcom_app
"debug": False, # 是否开启debug模式,开启后会打印更多日志
"appdata_dir": "", # 数据目录
# 插件配置
@@ -88,8 +103,12 @@ available_setting = {
class Config(dict):
def __init__(self, d: dict = {}):
super().__init__(d)
def __init__(self, d=None):
super().__init__()
if d is None:
d = {}
for k, v in d.items():
self[k] = v
# user_datas: 用户数据,key为用户名,value为用户数据,也是dict
self.user_datas = {}
@@ -197,3 +216,9 @@ def get_appdata_dir():
logger.info("[INIT] data path not exists, create it: {}".format(data_path))
os.makedirs(data_path)
return data_path
def subscribe_msg():
trigger_prefix = conf().get("single_chat_prefix", [""])[0]
msg = conf().get("subscribe_msg", "")
return msg.format(trigger_prefix=trigger_prefix)
+2 -2
View File
@@ -22,8 +22,8 @@ RUN apk add --no-cache \
&& cd ${BUILD_PREFIX} \
&& cp config-template.json ${BUILD_PREFIX}/config.json \
&& /usr/local/bin/python -m pip install --no-cache --upgrade pip \
&& pip install --no-cache -r requirements.txt \
&& pip install --no-cache -r requirements-optional.txt \
&& pip install --no-cache -r requirements.txt --extra-index-url https://alpine-wheels.github.io/index\
&& pip install --no-cache -r requirements-optional.txt --extra-index-url https://alpine-wheels.github.io/index\
&& apk del curl wget
WORKDIR ${BUILD_PREFIX}
@@ -1,4 +1,4 @@
FROM python:3.10-slim
FROM python:3.10-alpine
LABEL maintainer="foo@bar.com"
ARG TZ='Asia/Shanghai'
@@ -9,25 +9,21 @@ ENV BUILD_PREFIX=/app
ADD . ${BUILD_PREFIX}
RUN apt-get update \
&&apt-get install -y --no-install-recommends bash \
ffmpeg espeak \
RUN apk add --no-cache bash ffmpeg espeak \
&& cd ${BUILD_PREFIX} \
&& cp config-template.json config.json \
&& /usr/local/bin/python -m pip install --no-cache --upgrade pip \
&& pip install --no-cache -r requirements.txt \
&& pip install --no-cache -r requirements-optional.txt \
&& pip install azure-cognitiveservices-speech
&& pip install --no-cache -r requirements.txt --extra-index-url https://alpine-wheels.github.io/index\
&& pip install --no-cache -r requirements-optional.txt --extra-index-url https://alpine-wheels.github.io/index
WORKDIR ${BUILD_PREFIX}
ADD docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh \
&& groupadd -r noroot \
&& useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
&& adduser -D -h /home/noroot -u 1000 -s /bin/bash noroot \
&& chown -R noroot:noroot ${BUILD_PREFIX}
USER noroot
ENTRYPOINT ["docker/entrypoint.sh"]
ENTRYPOINT ["/entrypoint.sh"]
+2 -1
View File
@@ -31,9 +31,10 @@ WORKDIR ${BUILD_PREFIX}
ADD ./entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh \
&& mkdir -p /home/noroot \
&& groupadd -r noroot \
&& useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
&& chown -R noroot:noroot ${BUILD_PREFIX}
&& chown -R noroot:noroot /home/noroot ${BUILD_PREFIX} /usr/local/lib
USER noroot
+12 -6
View File
@@ -1,29 +1,35 @@
FROM python:3.10-alpine
FROM python:3.10-slim
LABEL maintainer="foo@bar.com"
ARG TZ='Asia/Shanghai'
ARG CHATGPT_ON_WECHAT_VER
RUN echo /etc/apt/sources.list
RUN sed -i 's/deb.debian.org/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list
ENV BUILD_PREFIX=/app
ADD . ${BUILD_PREFIX}
RUN apk add --no-cache bash ffmpeg espeak \
RUN apt-get update \
&&apt-get install -y --no-install-recommends bash ffmpeg espeak libavcodec-extra\
&& cd ${BUILD_PREFIX} \
&& cp config-template.json config.json \
&& /usr/local/bin/python -m pip install --no-cache --upgrade pip \
&& pip install --no-cache -r requirements.txt \
&& pip install --no-cache -r requirements-optional.txt
&& pip install --no-cache -r requirements-optional.txt \
&& pip install azure-cognitiveservices-speech
WORKDIR ${BUILD_PREFIX}
ADD docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh \
&& adduser -D -h /home/noroot -u 1000 -s /bin/bash noroot \
&& chown -R noroot:noroot ${BUILD_PREFIX}
&& mkdir -p /home/noroot \
&& groupadd -r noroot \
&& useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
&& chown -R noroot:noroot /home/noroot ${BUILD_PREFIX} /usr/local/lib
USER noroot
ENTRYPOINT ["docker/entrypoint.sh"]
ENTRYPOINT ["/entrypoint.sh"]
+3 -3
View File
@@ -38,9 +38,9 @@ if [ "$CHATGPT_ON_WECHAT_EXEC" == "" ] ; then
fi
# modify content in config.json
if [ "$OPEN_AI_API_KEY" == "YOUR API KEY" ] || [ "$OPEN_AI_API_KEY" == "" ]; then
echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
fi
# if [ "$OPEN_AI_API_KEY" == "YOUR API KEY" ] || [ "$OPEN_AI_API_KEY" == "" ]; then
# echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
# fi
# go to prefix dir
Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

+14 -7
View File
@@ -43,6 +43,7 @@ def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
logger.warning('itchat has already logged in.')
return
self.isLogging = True
logger.info('Ready to login.')
while self.isLogging:
uuid = push_login(self)
if uuid:
@@ -84,7 +85,7 @@ def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
# utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
@@ -195,13 +196,17 @@ def process_login_info(core, loginContent):
core.loginInfo['logintime'] = int(time.time() * 1e3)
core.loginInfo['BaseRequest'] = {}
cookies = core.s.cookies.get_dict()
skey = re.findall('<skey>(.*?)</skey>', r.text, re.S)[0]
pass_ticket = re.findall(
'<pass_ticket>(.*?)</pass_ticket>', r.text, re.S)[0]
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = skey
res = re.findall('<skey>(.*?)</skey>', r.text, re.S)
skey = res[0] if res else None
res = re.findall(
'<pass_ticket>(.*?)</pass_ticket>', r.text, re.S)
pass_ticket = res[0] if res else None
if skey is not None:
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = skey
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = cookies["wxsid"]
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = cookies["wxuin"]
core.loginInfo['pass_ticket'] = pass_ticket
if pass_ticket is not None:
core.loginInfo['pass_ticket'] = pass_ticket
# A question : why pass_ticket == DeviceID ?
# deviceID is only a randomly generated number
@@ -317,6 +322,8 @@ def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
logger.error("Having tried %s times, but still failed. " % (
retryCount) + "Stop trying...")
self.alive = False
else:
time.sleep(1)
@@ -363,7 +370,7 @@ def sync_check(self):
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
logger.error('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
+5 -2
View File
@@ -25,9 +25,12 @@ def auto_login(self, hotReload=False, statusStorageDir='itchat.pkl',
self.useHotReload = hotReload
self.hotReloadDir = statusStorageDir
if hotReload:
if self.load_login_status(statusStorageDir,
loginCallback=loginCallback, exitCallback=exitCallback):
rval=self.load_login_status(statusStorageDir,
loginCallback=loginCallback, exitCallback=exitCallback)
if rval:
return
logger.error('Hot reload failed, logging in normally, error={}'.format(rval))
self.logout()
self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
loginCallback=loginCallback, exitCallback=exitCallback)
self.dump_login_status(statusStorageDir)
+1 -1
View File
@@ -2,6 +2,6 @@ providers = ['python']
[phases.setup]
nixPkgs = ['python310']
cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak','python -m venv /opt/venv && . /opt/venv/bin/activate && pip install -r requirements-optional.txt']
cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak libavcodec-extra','python -m venv /opt/venv && . /opt/venv/bin/activate && pip install -r requirements-optional.txt']
[start]
cmd = "python ./app.py"
+1 -1
View File
@@ -64,7 +64,7 @@ class Dungeon(Plugin):
if e_context["context"].type != ContextType.TEXT:
return
bottype = Bridge().get_bot_type("chat")
if bottype not in (const.CHATGPT, const.OPEN_AI):
if bottype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
return
bot = Bridge().get_bot("chat")
content = e_context["context"].content[:]
+2 -2
View File
@@ -265,7 +265,7 @@ class Godcmd(Plugin):
except Exception as e:
ok, result = False, "你没有设置私有api_key"
elif cmd == "reset":
if bottype in (const.CHATGPT, const.OPEN_AI):
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
bot.sessions.clear_session(session_id)
channel.cancel_session(session_id)
ok, result = True, "会话已重置"
@@ -288,7 +288,7 @@ class Godcmd(Plugin):
load_config()
ok, result = True, "配置已重载"
elif cmd == "resetall":
if bottype in (const.CHATGPT, const.OPEN_AI):
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
channel.cancel_all_session()
bot.sessions.clear_all_session()
ok, result = True, "重置所有会话成功"
+1 -1
View File
@@ -123,7 +123,7 @@ class PluginManager:
try:
instance = plugincls()
except Exception as e:
logger.error("Failed to init %s, diabled. %s" % (name, e))
logger.exception("Failed to init %s, diabled. %s" % (name, e))
self.disable_plugin(name)
failed_plugins.append(name)
continue
+2 -2
View File
@@ -98,8 +98,8 @@ class Role(Plugin):
def on_handle_context(self, e_context: EventContext):
if e_context["context"].type != ContextType.TEXT:
return
bottype = Bridge().get_bot_type("chat")
if bottype not in (const.CHATGPT, const.OPEN_AI):
btype = Bridge().get_bot_type("chat")
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]:
return
bot = Bridge().get_bot("chat")
content = e_context["context"].content[:]
+27 -17
View File
@@ -1,6 +1,11 @@
## 插件描述
一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力
使用该插件需在机器人回复你的前提下,在对话内容前加$tool;仅输入$tool将返回tool插件帮助信息,用于测试插件是否加载成功
一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力
使用说明(默认trigger_prefix为$)
```text
#help tool: 查看tool帮助信息,可查看已加载工具列表
$tool 命令: 根据给出的{命令}使用一些可用工具尽力为你得到结果。
$tool reset: 重置工具。
```
### 本插件所有工具同步存放至专用仓库:[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)
@@ -19,9 +24,9 @@
> 注1url-get默认配置、browser需额外配置,browser依赖google-chrome,你需要提前安装好
> 注2browser默认使用summary tool 分段总结长文本信息tokens可能会大量消耗!
> 注2当检测到长文本时会进入summary tool总结长文本,tokens可能会大量消耗!
这是debian端安装google-chrome教程,其他系统请行查找
这是debian端安装google-chrome教程,其他系统请行查找
> https://www.linuxjournal.com/content/how-can-you-install-google-browser-debian
### 3. terminal
@@ -50,7 +55,7 @@
### 5. wikipedia
###### 可以回答你想要知道确切的人事物
### 6. 新闻类工具
### 6. news 新闻类工具集合
#### 6.1. news-api *
###### 从全球 80,000 多个信息源中获取当前和历史新闻文章
@@ -58,6 +63,11 @@
#### 6.2. morning-news *
###### 每日60秒早报,每天凌晨一点更新,本工具使用了[alapi-每日60秒早报](https://alapi.cn/api/view/93)
```text
可配置参数:
1. morning_news_use_llm: 是否使用LLM润色结果,默认false(可能会慢)
```
> 该tool每天返回内容相同
#### 6.3. finance-news
@@ -65,6 +75,8 @@
> 该工具需要解决browser tool 的google-chrome依赖安装
> news更新:0.4版本对news工具做了整合,只要加入news一个工具就会自动加载所有新闻类工具
### 7. bing-search *
###### bing搜索引擎,从此你不用再烦恼搜索要用哪些关键词
@@ -74,26 +86,25 @@
### 9. google-search *
###### google搜索引擎,申请流程较bing-search繁琐
### 10. arxiv(dev 开发中)
### 10. arxiv
###### 用于查找论文
```text
可配置参数:
1. arxiv_summary: 是否使用总结工具,默认true, 当为false时会直接返回论文的标题、作者、发布时间、摘要、分类、备注、pdf链接等内容
```
### 11. debug(dev 开发中,目前没有接入wechat)
###### 当bot遇到无法确定的信息时,将会向你寻求帮助的工具
> 0.4.2更新,例子:帮我找一篇吴恩达写的论文
### 12. summary
### 11. summary
###### 总结工具,该工具必须输入一个本地文件的绝对路径
> 该工具目前是和其他工具配合使用,暂未测试单独使用效果
### 13. image2text
### 12. image2text
###### 将图片转换成文字,底层调用imageCaption模型,该工具必须输入一个本地文件的绝对路径
### 14. searxng-search *
### 13. searxng-search *
###### 一个私有化的搜索引擎工具
> 安装教程:https://docs.searxng.org/admin/installation.html
@@ -118,7 +129,7 @@
```
注:config.json文件非必须,未创建仍可使用本tool;带*工具需在kwargs填入对应api-key键值对
- `tools`:本插件初始化时加载的工具, 目前可选集:["wikipedia", "wolfram-alpha", "bing-search", "google-search", "news"] & 默认工具,除wikipedia工具之外均需要申请api-key
- `tools`:本插件初始化时加载的工具, 上述标题即是对应工具名称,带*工具必须在kwargs中配置相应api-key
- `kwargs`:工具执行时的配置,一般在这里存放**api-key**,或环境配置
- `debug`: 输出chatgpt-tool-hub额外信息用于调试
- `request_timeout`: 访问openai接口的超时时间,默认与wechat-on-chatgpt配置一致,可单独配置
@@ -133,4 +144,3 @@
- 虽然我会有意加入一些限制,但请不要使用本插件做危害他人的事情,请提前了解清楚某些内容是否会违反相关规定,建议提前做好过滤
- 如有本插件问题,请将debug设置为true无上下文重新问一遍,如仍有问题请访问[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)建个issue,将日志贴进去,我无法处理不能复现的问题
- 欢迎 star & 宣传,有能力请提pr
+14 -5
View File
@@ -33,12 +33,17 @@ class Tool(Plugin):
def get_help_text(self, verbose=False, **kwargs):
help_text = "这是一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力。"
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
if not verbose:
return help_text
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
help_text += "使用说明:\n"
help_text += "\n使用说明:\n"
help_text += f"{trigger_prefix}tool " + "命令: 根据给出的{命令}使用一些可用工具尽力为你得到结果。\n"
help_text += f"{trigger_prefix}tool reset: 重置工具。\n"
help_text += f"{trigger_prefix}tool reset: 重置工具。\n\n"
help_text += f"已加载工具列表: \n"
for idx, tool in enumerate(self.app.get_tool_list()):
if idx != 0:
help_text += ", "
help_text += f"{tool}"
return help_text
def on_handle_context(self, e_context: EventContext):
@@ -130,6 +135,7 @@ class Tool(Plugin):
return {
"debug": kwargs.get("debug", False),
"openai_api_key": conf().get("open_ai_api_key", ""),
"open_ai_api_base": conf().get("open_ai_api_base", "https://api.openai.com/v1"),
"proxy": conf().get("proxy", ""),
"request_timeout": request_timeout if request_timeout else conf().get("request_timeout", 120),
# note: 目前tool暂未对其他模型测试,但这里仍对配置来源做了优先级区分,一般插件配置可覆盖全局配置
@@ -144,13 +150,16 @@ class Tool(Plugin):
"google_api_key": kwargs.get("google_api_key", ""),
"google_cse_id": kwargs.get("google_cse_id", ""),
# for searxng-search tool
"searx_host": kwargs.get("searx_host", ""),
"searx_search_host": kwargs.get("searx_search_host", ""),
# for wolfram-alpha tool
"wolfram_alpha_appid": kwargs.get("wolfram_alpha_appid", ""),
# for morning-news tool
"zaobao_api_key": kwargs.get("zaobao_api_key", ""),
"morning_news_api_key": kwargs.get("morning_news_api_key", ""),
# for visual_dl tool
"cuda_device": kwargs.get("cuda_device", "cpu"),
"think_depth": kwargs.get("think_depth", 3),
"arxiv_summary": kwargs.get("arxiv_summary", True),
"morning_news_use_llm": kwargs.get("morning_news_use_llm", False),
}
def _filter_tool_list(self, tool_list: list):
+5 -3
View File
@@ -6,7 +6,9 @@ SpeechRecognition # google speech to text
gTTS>=2.3.1 # google text to speech
pyttsx3>=2.90 # pytsx text to speech
baidu_aip>=4.16.10 # baidu voice
# azure-cognitiveservices-speech # azure voice
azure-cognitiveservices-speech # azure voice
numpy<=1.24.2
langid # language detect
#install plugin
dulwich
@@ -16,11 +18,11 @@ wechaty>=0.10.7
wechaty_puppet>=0.4.23
pysilk_mod>=1.6.0 # needed by send voice
# wechatmp
# wechatmp wechatcom
web.py
wechatpy
# chatgpt-tool-hub plugin
--extra-index-url https://pypi.python.org/simple
chatgpt_tool_hub>=0.4.1
chatgpt_tool_hub==0.4.3
+1
View File
@@ -4,4 +4,5 @@ PyQRCode>=1.2.1
qrcode>=7.4.2
requests>=2.28.2
chardet>=5.1.0
Pillow
pre-commit
+47
View File
@@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
import random
from hashlib import md5
import requests
from config import conf
from translate.translator import Translator
class BaiduTranslator(Translator):
def __init__(self) -> None:
super().__init__()
endpoint = "http://api.fanyi.baidu.com"
path = "/api/trans/vip/translate"
self.url = endpoint + path
self.appid = conf().get("baidu_translate_app_id")
self.appkey = conf().get("baidu_translate_app_key")
# For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`, need to convert to ISO 639-1 codes
def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
if not from_lang:
from_lang = "auto" # baidu suppport auto detect
salt = random.randint(32768, 65536)
sign = self.make_md5(self.appid + query + str(salt) + self.appkey)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {"appid": self.appid, "q": query, "from": from_lang, "to": to_lang, "salt": salt, "sign": sign}
retry_cnt = 3
while retry_cnt:
r = requests.post(self.url, params=payload, headers=headers)
result = r.json()
errcode = result.get("error_code", "52000")
if errcode != "52000":
if errcode == "52001" or errcode == "52002":
retry_cnt -= 1
continue
else:
raise Exception(result["error_msg"])
else:
break
text = "\n".join([item["dst"] for item in result["trans_result"]])
return text
def make_md5(self, s, encoding="utf-8"):
return md5(s.encode(encoding)).hexdigest()
+6
View File
@@ -0,0 +1,6 @@
def create_translator(voice_type):
if voice_type == "baidu":
from translate.baidu.baidu_translate import BaiduTranslator
return BaiduTranslator()
raise RuntimeError
+12
View File
@@ -0,0 +1,12 @@
"""
Voice service abstract class
"""
class Translator(object):
# please use https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes to specify language
def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
"""
Translate text from one language to another
"""
raise NotImplementedError
+38
View File
@@ -80,6 +80,21 @@ def any_to_sil(any_path, sil_path):
return audio.duration_seconds * 1000
def any_to_amr(any_path, amr_path):
"""
把任意格式转成amr文件
"""
if any_path.endswith(".amr"):
shutil.copy2(any_path, amr_path)
return
if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
raise NotImplementedError("Not support file type: {}".format(any_path))
audio = AudioSegment.from_file(any_path)
audio = audio.set_frame_rate(8000) # only support 8000
audio.export(amr_path, format="amr")
return audio.duration_seconds * 1000
def sil_to_wav(silk_path, wav_path, rate: int = 24000):
"""
silk 文件转 wav
@@ -87,3 +102,26 @@ def sil_to_wav(silk_path, wav_path, rate: int = 24000):
wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
with open(wav_path, "wb") as f:
f.write(wav_data)
def split_audio(file_path, max_segment_length_ms=60000):
"""
分割音频文件
"""
audio = AudioSegment.from_file(file_path)
audio_length_ms = len(audio)
if audio_length_ms <= max_segment_length_ms:
return audio_length_ms, [file_path]
segments = []
for start_ms in range(0, audio_length_ms, max_segment_length_ms):
end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
segment = audio[start_ms:end_ms]
segments.append(segment)
file_prefix = file_path[: file_path.rindex(".")]
format = file_path[file_path.rindex(".") + 1 :]
files = []
for i, segment in enumerate(segments):
path = f"{file_prefix}_{i+1}" + f".{format}"
segment.export(path, format=format)
files.append(path)
return audio_length_ms, files
+29 -6
View File
@@ -6,6 +6,7 @@ import os
import time
import azure.cognitiveservices.speech as speechsdk
from langid import classify
from bridge.reply import Reply, ReplyType
from common.log import logger
@@ -30,7 +31,15 @@ class AzureVoice(Voice):
config = None
if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件
config = {
"speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural",
"speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural", # 识别不出时的默认语音
"auto_detect": True, # 是否自动检测语言
"speech_synthesis_zh": "zh-CN-XiaozhenNeural",
"speech_synthesis_en": "en-US-JacobNeural",
"speech_synthesis_ja": "ja-JP-AoiNeural",
"speech_synthesis_ko": "ko-KR-SoonBokNeural",
"speech_synthesis_de": "de-DE-LouisaNeural",
"speech_synthesis_fr": "fr-FR-BrigitteNeural",
"speech_synthesis_es": "es-ES-LaiaNeural",
"speech_recognition_language": "zh-CN",
}
with open(config_path, "w") as fw:
@@ -38,11 +47,12 @@ class AzureVoice(Voice):
else:
with open(config_path, "r") as fr:
config = json.load(fr)
self.config = config
self.api_key = conf().get("azure_voice_api_key")
self.api_region = conf().get("azure_voice_region")
self.speech_config = speechsdk.SpeechConfig(subscription=self.api_key, region=self.api_region)
self.speech_config.speech_synthesis_voice_name = config["speech_synthesis_voice_name"]
self.speech_config.speech_recognition_language = config["speech_recognition_language"]
self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
self.speech_config.speech_recognition_language = self.config["speech_recognition_language"]
except Exception as e:
logger.warn("AzureVoice init failed: %s, ignore " % e)
@@ -54,12 +64,24 @@ class AzureVoice(Voice):
logger.info("[Azure] voiceToText voice file name={} text={}".format(voice_file, result.text))
reply = Reply(ReplyType.TEXT, result.text)
else:
logger.error("[Azure] voiceToText error, result={}, canceldetails={}".format(result, result.cancellation_details))
cancel_details = result.cancellation_details
logger.error("[Azure] voiceToText error, result={}, errordetails={}".format(result, cancel_details.error_details))
reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败")
return reply
def textToVoice(self, text):
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + ".wav"
if self.config.get("auto_detect"):
lang = classify(text)[0]
key = "speech_synthesis_" + lang
if key in self.config:
logger.info("[Azure] textToVoice auto detect language={}, voice={}".format(lang, self.config[key]))
self.speech_config.speech_synthesis_voice_name = self.config[key]
else:
self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
else:
self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
# Avoid the same filename under multithreading
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
audio_config = speechsdk.AudioConfig(filename=fileName)
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.speech_config, audio_config=audio_config)
result = speech_synthesizer.speak_text(text)
@@ -67,6 +89,7 @@ class AzureVoice(Voice):
logger.info("[Azure] textToVoice text={} voice file name={}".format(text, fileName))
reply = Reply(ReplyType.VOICE, fileName)
else:
logger.error("[Azure] textToVoice error, result={}, canceldetails={}".format(result, result.cancellation_details))
cancel_details = result.cancellation_details
logger.error("[Azure] textToVoice error, result={}, errordetails={}".format(result, cancel_details.error_details))
reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
return reply
+8
View File
@@ -1,4 +1,12 @@
{
"speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural",
"auto_detect": true,
"speech_synthesis_zh": "zh-CN-YunxiNeural",
"speech_synthesis_en": "en-US-JacobNeural",
"speech_synthesis_ja": "ja-JP-AoiNeural",
"speech_synthesis_ko": "ko-KR-SoonBokNeural",
"speech_synthesis_de": "de-DE-LouisaNeural",
"speech_synthesis_fr": "fr-FR-BrigitteNeural",
"speech_synthesis_es": "es-ES-LaiaNeural",
"speech_recognition_language": "zh-CN"
}
+2 -1
View File
@@ -82,7 +82,8 @@ class BaiduVoice(Voice):
{"spd": self.spd, "pit": self.pit, "vol": self.vol, "per": self.per},
)
if not isinstance(result, dict):
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + ".mp3"
# Avoid the same filename under multithreading
fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
with open(fileName, "wb") as f:
f.write(result)
logger.info("[Baidu] textToVoice text={} voice file name={}".format(text, fileName))
+2 -1
View File
@@ -35,7 +35,8 @@ class GoogleVoice(Voice):
def textToVoice(self, text):
try:
mp3File = TmpDir().path() + "reply-" + str(int(time.time())) + ".mp3"
# Avoid the same filename under multithreading
mp3File = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
tts = gTTS(text=text, lang="zh")
tts.save(mp3File)
logger.info("[Google] textToVoice text={} voice file name={}".format(text, mp3File))
+1 -1
View File
@@ -34,7 +34,7 @@ class PyttsVoice(Voice):
def textToVoice(self, text):
try:
# avoid the same filename
# Avoid the same filename under multithreading
wavFileName = "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
wavFile = TmpDir().path() + wavFileName
logger.info("[Pytts] textToVoice text={} voice file name={}".format(text, wavFile))