Compare commits

..

9 Commits

Author SHA1 Message Date
lanvent 7458a6298f feat: add trigger_by_self option 2023-04-03 23:58:19 +08:00
lanvent b0f54bb8b7 fix: dirty message including at and prefix 2023-04-03 23:53:58 +08:00
lanvent acddadc406 feat: add convert pcm32 to pcm16 2023-04-03 22:55:39 +08:00
lanvent b74274b96b fix: old code in hello plugin 2023-04-03 02:00:33 +08:00
lanvent 49ba278316 fix: use english filename 2023-04-02 16:50:11 +08:00
lanvent 388058467c fix: delete same file twice 2023-04-02 14:55:45 +08:00
lanvent cf25bd7869 feat: itchat show qrcode using viewer 2023-04-02 14:45:38 +08:00
lanvent 02a95345aa fix: add more qrcode api 2023-04-02 14:13:38 +08:00
lanvent 6076e2ed0a fix: voice longer than 60s cannot be sent 2023-04-02 12:29:10 +08:00
11 changed files with 62 additions and 30 deletions
+15 -10
View File
@@ -38,7 +38,7 @@ class ChatChannel(Channel):
if first_in: # context首次传入时,receiver是None,根据类型设置receiver
config = conf()
cmsg = context['msg']
if cmsg.from_user_id == self.user_id:
if cmsg.from_user_id == self.user_id and not config.get('trigger_by_self', False):
logger.debug("[WX]self message skipped")
return None
if context["isgroup"]:
@@ -70,17 +70,21 @@ class ChatChannel(Channel):
# 校验关键字
match_prefix = check_prefix(content, conf().get('group_chat_prefix'))
match_contain = check_contain(content, conf().get('group_chat_keyword'))
flag = False
if match_prefix is not None or match_contain is not None:
flag = True
if match_prefix:
content = content.replace(match_prefix, '', 1).strip()
elif context['msg'].is_at and not conf().get("group_at_off", False):
logger.info("[WX]receive group at, continue")
if context['msg'].is_at:
logger.info("[WX]receive group at")
if not conf().get("group_at_off", False):
flag = True
pattern = f'@{self.name}(\u2005|\u0020)'
content = re.sub(pattern, r'', content)
elif context["origin_ctype"] == ContextType.VOICE:
logger.info("[WX]receive group voice, checkprefix didn't match")
return None
else:
if not flag:
if context["origin_ctype"] == ContextType.VOICE:
logger.info("[WX]receive group voice, but checkprefix didn't match")
return None
else: # 单聊
match_prefix = check_prefix(content, conf().get('single_chat_prefix'))
@@ -106,7 +110,6 @@ class ChatChannel(Channel):
return context
# 处理消息 TODO: 如果wechaty解耦,此处逻辑可以放置到父类
def _handle(self, context: Context):
if context is None or not context.content:
return
@@ -144,9 +147,11 @@ class ChatChannel(Channel):
# 删除临时文件
try:
os.remove(file_path)
os.remove(wav_path)
if wav_path != file_path:
os.remove(wav_path)
except Exception as e:
logger.warning("[WX]delete temp file error: " + str(e))
pass
# logger.warning("[WX]delete temp file error: " + str(e))
if reply.type == ReplyType.TEXT:
new_context = self._compose_context(
+17 -3
View File
@@ -70,11 +70,25 @@ def _check(func):
def qrCallback(uuid,status,qrcode):
# logger.debug("qrCallback: {} {}".format(uuid,status))
if status == '0':
try:
from PIL import Image
img = Image.open(io.BytesIO(qrcode))
thread_pool.submit(img.show,"QRCode")
except Exception as e:
pass
import qrcode
url = f"https://login.weixin.qq.com/l/{uuid}"
qr_api="https://api.isoyu.com/qr/?m=1&e=L&p=20&url={}".format(url)
print("You can also scan QRCode in the website below:\n{}".format(qr_api))
qr_api1="https://api.isoyu.com/qr/?m=1&e=L&p=20&url={}".format(url)
qr_api2="https://api.qrserver.com/v1/create-qr-code/?size=400×400&data={}".format(url)
qr_api3="https://api.pwmqr.com/qrcode/create/?url={}".format(url)
qr_api4="https://my.tv.sohu.com/user/a/wvideo/getQRCode.do?text={}".format(url)
print("You can also scan QRCode in any website below:")
print(qr_api3)
print(qr_api4)
print(qr_api2)
print(qr_api1)
qr = qrcode.QRCode(border=1)
qr.add_data(url)
+5 -2
View File
@@ -73,6 +73,9 @@ class WechatyChannel(ChatChannel):
file_path = reply.content
sil_file = os.path.splitext(file_path)[0] + '.sil'
voiceLength = int(any_to_sil(file_path, sil_file))
if voiceLength >= 60000:
voiceLength = 60000
logger.info('[WX] voice too long, length={}, set to 60s'.format(voiceLength))
# 发送语音
t = int(time.time())
msg = FileBox.from_file(sil_file, name=str(t) + '.sil')
@@ -81,7 +84,8 @@ class WechatyChannel(ChatChannel):
asyncio.run_coroutine_threadsafe(receiver.say(msg),loop).result()
try:
os.remove(file_path)
os.remove(sil_file)
if sil_file != file_path:
os.remove(sil_file)
except Exception as e:
pass
logger.info('[WX] sendVoice={}, receiver={}'.format(reply.content, receiver))
@@ -113,7 +117,6 @@ class WechatyChannel(ChatChannel):
return
logger.debug('[WX] message:{}'.format(cmsg))
room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None
isgroup = room is not None
ctype = cmsg.ctype
context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg)
+1
View File
@@ -25,6 +25,7 @@ available_setting = {
"group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"trigger_by_self": False, # 是否允许机器人触发
"image_create_prefix": ["", "", ""], # 开启图片回复的前缀
# chatgpt会话参数
+8 -5
View File
@@ -101,7 +101,7 @@ PS: 插件目前支持`itchat`和`wechaty`
根据`Context`和回复`Reply`的类型,对回复的内容进行装饰。目前的装饰有以下两种:
- `TEXT`文本回复根据是否在群聊中来决定是艾特接收方还是添加回复的前缀。
- `TEXT`文本回复:如果这次消息需要的回复是`VOICE`,进行文字转语音回复之后再次装饰。 否则根据是否在群聊中来决定是艾特接收方还是添加回复的前缀。
- `INFO``ERROR`类型,会在消息前添加对应的系统提示字样。
@@ -110,8 +110,11 @@ PS: 插件目前支持`itchat`和`wechaty`
```python
if reply.type == ReplyType.TEXT:
reply_text = reply.content
if context.get('desire_rtype') == ReplyType.VOICE:
reply = super().build_text_to_voice(reply.content)
return self._decorate_reply(context, reply)
if context['isgroup']:
reply_text = '@' + context['msg']['ActualNickName'] + ' ' + reply_text.strip()
reply_text = '@' + context['msg'].actual_user_nickname + ' ' + reply_text.strip()
reply_text = conf().get("group_chat_reply_prefix", "")+reply_text
else:
reply_text = conf().get("single_chat_reply_prefix", "")+reply_text
@@ -213,11 +216,11 @@ class Hello(Plugin):
if content == "Hello":
reply = Reply()
reply.type = ReplyType.TEXT
msg = e_context['context']['msg']
msg:ChatMessage = e_context['context']['msg']
if e_context['context']['isgroup']:
reply.content = "Hello, " + msg['ActualNickName'] + " from " + msg['User'].get('NickName', "Group")
reply.content = f"Hello, {msg.actual_user_nickname} from {msg.from_user_nickname}"
else:
reply.content = "Hello, " + msg['User'].get('NickName', "My friend")
reply.content = f"Hello, {msg.from_user_nickname}"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
if content == "End":
+4 -3
View File
@@ -2,6 +2,7 @@
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from channel.chat_message import ChatMessage
import plugins
from plugins import *
from common.log import logger
@@ -24,11 +25,11 @@ class Hello(Plugin):
if content == "Hello":
reply = Reply()
reply.type = ReplyType.TEXT
msg = e_context['context']['msg']
msg:ChatMessage = e_context['context']['msg']
if e_context['context']['isgroup']:
reply.content = "Hello, " + msg['ActualNickName'] + " from " + msg['User'].get('NickName', "Group")
reply.content = f"Hello, {msg.actual_user_nickname} from {msg.from_user_nickname}"
else:
reply.content = "Hello, " + msg['User'].get('NickName', "My friend")
reply.content = f"Hello, {msg.from_user_nickname}"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
+8 -3
View File
@@ -67,23 +67,28 @@ def pcm_to_sil(pcm_path, silk_path):
return 声音长度,毫秒
"""
audio = AudioSegment.from_wav(pcm_path)
wav_data = audio.raw_data
rate = find_closest_sil_supports(audio.frame_rate)
# Convert to PCM_s16
pcm_s16 = audio.set_sample_width(2)
pcm_s16 = pcm_s16.set_frame_rate(rate)
wav_data = pcm_s16.raw_data
silk_data = pysilk.encode(
wav_data, data_rate=rate, sample_rate=rate)
with open(silk_path, "wb") as f:
f.write(silk_data)
return audio.duration_seconds * 1000
def mp3_to_sil(mp3_path, silk_path):
"""
mp3 文件转成 silk
return 声音长度,毫秒
"""
audio = AudioSegment.from_mp3(mp3_path)
wav_data = audio.raw_data
rate = find_closest_sil_supports(audio.frame_rate)
# Convert to PCM_s16
pcm_s16 = audio.set_sample_width(2)
pcm_s16 = pcm_s16.set_frame_rate(rate)
wav_data = pcm_s16.raw_data
silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
# Save the silk file
with open(silk_path, "wb") as f:
+1 -1
View File
@@ -54,7 +54,7 @@ class AzureVoice(Voice):
return reply
def textToVoice(self, text):
fileName = TmpDir().path() + '语音回复_' + str(int(time.time())) + '.wav'
fileName = TmpDir().path() + 'reply-' + str(int(time.time())) + '.wav'
audio_config = speechsdk.AudioConfig(filename=fileName)
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.speech_config, audio_config=audio_config)
result = speech_synthesizer.speak_text(text)
+1 -1
View File
@@ -80,7 +80,7 @@ class BaiduVoice(Voice):
result = self.client.synthesis(text, self.lang, self.ctp, {
'spd': self.spd, 'pit': self.pit, 'vol': self.vol, 'per': self.per})
if not isinstance(result, dict):
fileName = TmpDir().path() + '语音回复_' + str(int(time.time())) + '.mp3'
fileName = TmpDir().path() + 'reply-' + str(int(time.time())) + '.mp3'
with open(fileName, 'wb') as f:
f.write(result)
logger.info(
+1 -1
View File
@@ -34,7 +34,7 @@ class GoogleVoice(Voice):
return reply
def textToVoice(self, text):
try:
mp3File = TmpDir().path() + '语音回复_' + str(int(time.time())) + '.mp3'
mp3File = TmpDir().path() + 'reply-' + str(int(time.time())) + '.mp3'
tts = gTTS(text=text, lang='zh')
tts.save(mp3File)
logger.info(
+1 -1
View File
@@ -25,7 +25,7 @@ class PyttsVoice(Voice):
def textToVoice(self, text):
try:
wavFile = TmpDir().path() + '语音回复_' + str(int(time.time())) + '.wav'
wavFile = TmpDir().path() + 'reply-' + str(int(time.time())) + '.wav'
self.engine.save_to_file(text, wavFile)
self.engine.runAndWait()
logger.info(