代码编辑器添加AI助手,支持AI代码编写

This commit is contained in:
fofolee 2025-02-20 01:04:11 +08:00
parent ac19d06845
commit d82c735cfe
14 changed files with 1033 additions and 325 deletions

31
package-lock.json generated
View File

@ -11,6 +11,7 @@
"@quasar/extras": "^1.14.0",
"core-js": "^3.6.5",
"croner": "^4.3.9",
"dompurify": "^3.2.4",
"marked": "^15.0.7",
"monaco-editor": "^0.33.0",
"monaco-editor-webpack-plugin": "^7.0.1",
@ -2488,6 +2489,13 @@
"@types/node": "*"
}
},
"node_modules/@types/trusted-types": {
"version": "2.0.7",
"resolved": "https://registry.npmmirror.com/@types/trusted-types/-/trusted-types-2.0.7.tgz",
"integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
"license": "MIT",
"optional": true
},
"node_modules/@types/webpack-bundle-analyzer": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/@types/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.7.0.tgz",
@ -4625,6 +4633,15 @@
"url": "https://github.com/fb55/domhandler?sponsor=1"
}
},
"node_modules/dompurify": {
"version": "3.2.4",
"resolved": "https://registry.npmmirror.com/dompurify/-/dompurify-3.2.4.tgz",
"integrity": "sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg==",
"license": "(MPL-2.0 OR Apache-2.0)",
"optionalDependencies": {
"@types/trusted-types": "^2.0.7"
}
},
"node_modules/domutils": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz",
@ -13085,6 +13102,12 @@
"@types/node": "*"
}
},
"@types/trusted-types": {
"version": "2.0.7",
"resolved": "https://registry.npmmirror.com/@types/trusted-types/-/trusted-types-2.0.7.tgz",
"integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
"optional": true
},
"@types/webpack-bundle-analyzer": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/@types/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.7.0.tgz",
@ -14597,6 +14620,14 @@
"domelementtype": "^2.2.0"
}
},
"dompurify": {
"version": "3.2.4",
"resolved": "https://registry.npmmirror.com/dompurify/-/dompurify-3.2.4.tgz",
"integrity": "sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg==",
"requires": {
"@types/trusted-types": "^2.0.7"
}
},
"domutils": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz",

View File

@ -14,6 +14,7 @@
"@quasar/extras": "^1.14.0",
"core-js": "^3.6.5",
"croner": "^4.3.9",
"dompurify": "^3.2.4",
"marked": "^15.0.7",
"monaco-editor": "^0.33.0",
"monaco-editor-webpack-plugin": "^7.0.1",

View File

@ -1,155 +1,303 @@
const axios = require("axios");
// 支持的模型类型
const MODEL_TYPES = {
const API_TYPES = {
OPENAI: "openai",
OLLAMA: "ollama",
};
// 预设提示词
const PRESET_PROMPTS = {
// 角色提示词
const ROLE_PROMPTS = {
// 翻译
translate: `请将以下内容翻译成地道的中文,要求:
1. 保持原文的专业性和准确性
2. 符合中文的表达习惯
3. 对于专业术语保留英文原文并在括号中给出中文翻译
4. 保持原文的段落格式
原文`,
translate: `你是一名翻译专家,请将我给你的内容进行翻译,要求:
1. 无论给的内容长短请直接翻译不要进行任何解释
2. 提供中文时翻译成地道的英文符合英文的表达习惯
3. 提供英文时翻译成地道的中文符合中文的表达习惯
4. 保持原文的专业性和准确性
5. 对于专业术语保留原文并在括号中给出对应的中文翻译
6. 保持原文的段落格式
`,
// 生成SHELL命令
shell: `请根据以下描述生成一个 shell 命令,要求:
shell: `你是一名shell命令专家请根据我的描述生成 shell 命令,要求:
1. 命令应当简洁高效
2. 优先使用常见的命令行工具
3. 确保命令的安全性和可靠性
4. 对于复杂操作添加注释说明
5. 如果需要多个命令使用 && 连接或使用脚本格式
6. 直接输出命令不要输出任何解释不要使用markdown格式
需求描述`,
`,
// 总结
summarize: `请总结以下内容的要点,要求:
summarize: `你是一名总结专家,请总结我给你的内容的要点,要求:
1. 提取最重要和最有价值的信息
2. 使用简洁的语言
3. 按重要性排序
4. 保持逻辑性和连贯性
5. 如果有专业术语保留并解释
原文`,
`,
};
// API URL 处理
const API_ENDPOINTS = {
[API_TYPES.OPENAI]: {
chat: "/v1/chat/completions",
models: "/v1/models",
},
[API_TYPES.OLLAMA]: {
chat: "/api/chat",
models: "/api/tags",
},
};
// 构建API URL
function buildApiUrl(baseUrl, endpoint) {
if (!baseUrl.endsWith(endpoint)) {
return baseUrl.replace(/\/?$/, endpoint);
}
return baseUrl;
}
// 构建请求配置
function buildRequestConfig(apiConfig) {
const config = {
headers: {
"Content-Type": "application/json",
},
};
if (apiConfig.apiType === API_TYPES.OPENAI && apiConfig.apiToken) {
config.headers["Authorization"] = `Bearer ${apiConfig.apiToken}`;
}
return config;
}
// 构建请求数据
function buildRequestData(content, apiConfig, stream = false) {
const { model } = apiConfig;
const { prompt, role, context = [] } = content;
const rolePrompt = ROLE_PROMPTS[role] || role;
const roleMessage = rolePrompt
? [
{
role: "user",
content: rolePrompt,
},
]
: [];
// 统一的消息格式处理
const messages = [
// 添加系统角色消息(如果有)
...roleMessage,
// 添加上下文消息
...context.map((msg) => ({
role: msg.role || "user",
content: msg.content,
})),
// 添加当前用户消息
{
role: "user",
content: prompt,
},
];
return {
model,
messages,
stream,
};
}
// 处理普通响应
function parseResponse(response, apiType) {
if (apiType === API_TYPES.OPENAI) {
if (!response.data.choices || !response.data.choices[0]) {
throw new Error("OpenAI 响应格式错误");
}
return response.data.choices[0].message.content;
} else {
if (!response.data.message) {
throw new Error("Ollama 响应格式错误");
}
return response.data.message.content;
}
}
// 处理模型列表响应
function parseModelsResponse(response, apiType) {
if (apiType === API_TYPES.OPENAI) {
if (!response.data.data) {
throw new Error("OpenAI 响应格式错误");
}
return response.data.data.map((model) => model.id);
} else {
if (!response.data.models) {
throw new Error("Ollama 响应格式错误");
}
return response.data.models.map((model) => model.name);
}
}
// 处理 OpenAI 流式响应
async function handleOpenAIStreamResponse(line, controller, onStream) {
if (line.startsWith("data: ")) {
const jsonStr = line.replace(/^data: /, "");
if (jsonStr === "[DONE]") {
onStream("", controller, true);
return;
}
const json = JSON.parse(jsonStr);
const content = json.choices[0]?.delta?.content;
if (content) {
onStream(content, controller, false);
}
}
}
// 处理 Ollama 流式响应
async function handleOllamaStreamResponse(line, controller, onStream) {
const json = JSON.parse(line);
if (json.done) {
onStream("", controller, true);
return;
}
if (json.message?.content) {
onStream(json.message.content, controller, false);
}
}
// 处理流式响应
async function handleStreamResponse(response, apiConfig, controller, onStream) {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
try {
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
if (line.trim()) {
try {
if (apiConfig.apiType === API_TYPES.OPENAI) {
await handleOpenAIStreamResponse(line, controller, onStream);
} else {
await handleOllamaStreamResponse(line, controller, onStream);
}
} catch (e) {
console.error("解析响应失败:", e);
}
}
}
}
// 处理剩余的缓冲区
if (buffer.trim()) {
try {
if (apiConfig.apiType === API_TYPES.OPENAI) {
await handleOpenAIStreamResponse(buffer, controller, onStream);
} else {
await handleOllamaStreamResponse(buffer, controller, onStream);
}
} catch (e) {
console.error("解析剩余响应失败:", e);
}
}
} catch (error) {
if (error.name === "AbortError") {
return {
success: false,
error: "请求已取消",
cancelled: true,
};
}
throw error;
} finally {
reader.releaseLock();
}
return { success: true, result: "流式请求完成" };
}
/**
* AI对话功能
* @param {Object} apiConfig - API配置参数
* @param {string} apiConfig.modelType - 模型类型(openai/ollama)
* @param {string} apiConfig.apiUrl - API地址
* @param {string} apiConfig.apiToken - API令牌
* @param {string} apiConfig.model - 模型名称
* @param {Object} content - 对话内容参数
* @param {string} content.prompt - 用户输入的提示词
* @param {string} content.presetPrompt - 预设提示词类型
* @param {Object} apiConfig - API配置参数
* @param {Object} options - 其他选项
* @returns {Promise<Object>} 对话响应
*/
async function chat(content, apiConfig) {
async function chat(content, apiConfig, options = {}) {
try {
const { modelType, apiUrl, apiToken, model } = apiConfig;
const { prompt, presetPrompt } = content;
const { showLoadingBar = true, stream = false, onStream } = options;
// 验证必要参数
if (!apiUrl || !prompt || !model) {
if (!apiConfig.apiUrl || !content.prompt || !apiConfig.model) {
throw new Error("API地址、模型名称和提示词不能为空");
}
// 构建完整提示词
const fullPrompt = presetPrompt
? `${PRESET_PROMPTS[presetPrompt]}\n${prompt}`
: prompt;
if (stream && !onStream) {
throw new Error("使用流式请求时必须提供onStream回调函数");
}
// 准备请求配置
const config = {
headers: {
"Content-Type": "application/json",
},
};
// 构建请求URL和配置
const url = buildApiUrl(
apiConfig.apiUrl,
API_ENDPOINTS[apiConfig.apiType].chat
);
const config = buildRequestConfig(apiConfig, stream);
const requestData = buildRequestData(content, apiConfig, stream);
let requestData;
let url = apiUrl;
// 根据不同的模型类型构建请求数据
if (modelType === MODEL_TYPES.OPENAI) {
// OpenAI API
config.headers["Authorization"] = `Bearer ${apiToken}`;
requestData = {
model: model,
messages: [
{
role: "user",
content: fullPrompt,
// 显示加载条
const loadingBar = showLoadingBar
? await quickcommand.showLoadingBar({
text: "AI思考中...",
onClose: () => {
if (controller) {
controller.abort();
}
},
],
};
} else if (modelType === MODEL_TYPES.OLLAMA) {
// Ollama API
// 如果用户没有指定完整的 API 路径,添加 /api/generate
if (!url.endsWith("/api/generate")) {
url = url.replace(/\/?$/, "/api/generate");
}
})
: null;
requestData = {
model: model,
prompt: fullPrompt,
stream: false,
};
} else {
throw new Error("不支持的模型类型");
// 统一使用 fetch 处理请求
const controller = new AbortController();
const response = await fetch(url, {
method: "POST",
headers: config.headers,
body: JSON.stringify(requestData),
signal: controller.signal,
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const loadingBar = await quickcommand.showLoadingBar({
text: "AI思考中...",
onClose: () => {
// 取消请求
if (source) {
source.cancel("操作已取消");
}
},
});
// 创建取消令牌
const CancelToken = axios.CancelToken;
const source = CancelToken.source();
// 发送请求
const response = await axios.post(url, requestData, {
...config,
cancelToken: source.token,
});
loadingBar.close();
// 解析不同模型的响应
let result;
if (modelType === MODEL_TYPES.OPENAI) {
// OpenAI 响应格式
if (!response.data.choices || !response.data.choices[0]) {
throw new Error("OpenAI 响应格式错误");
}
result = response.data.choices[0].message.content;
if (stream) {
result = await handleStreamResponse(
response,
apiConfig,
controller,
onStream
);
} else {
// Ollama 响应格式
if (!response.data.response) {
throw new Error("Ollama 响应格式错误");
}
result = response.data.response;
const responseData = await response.json();
result = {
success: true,
result: parseResponse({ data: responseData }, apiConfig.apiType),
};
}
return {
success: true,
result,
};
loadingBar?.close();
return result;
} catch (error) {
// 如果是用户取消的请求,返回特定的错误信息
if (axios.isCancel(error)) {
if (error.name === "AbortError") {
return {
success: false,
error: "请求已取消",
@ -166,69 +314,33 @@ async function chat(content, apiConfig) {
/**
* 获取API支持的模型列表
* @param {Object} apiConfig - API配置参数
* @param {string} apiConfig.modelType - 模型类型(openai/ollama)
* @param {string} apiConfig.apiUrl - API地址
* @param {string} apiConfig.apiToken - API令牌
* @returns {Promise<Object>} 模型列表响应
*/
async function getModels(apiConfig) {
try {
const { modelType, apiUrl, apiToken } = apiConfig;
// 验证必要参数
if (!apiUrl) {
if (!apiConfig.apiUrl) {
throw new Error("API地址不能为空");
}
// 准备请求配置
const config = {
headers: {
"Content-Type": "application/json",
},
};
const url = buildApiUrl(
apiConfig.apiUrl,
API_ENDPOINTS[apiConfig.apiType].models
);
const config = buildRequestConfig(apiConfig);
let url = apiUrl;
const response = await fetch(url, {
method: "GET",
headers: config.headers,
});
// 根据不同的模型类型构建请求
if (modelType === MODEL_TYPES.OPENAI) {
// OpenAI API
config.headers["Authorization"] = `Bearer ${apiToken}`;
// OpenAI的模型列表接口是 /v1/models
if (!url.endsWith("/models")) {
url = "https://api.openai.com/v1/models";
}
} else if (modelType === MODEL_TYPES.OLLAMA) {
// Ollama API
// Ollama的模型列表接口是 /api/tags
if (!url.endsWith("/api/tags")) {
url = url.replace(/\/?$/, "/api/tags");
}
} else {
throw new Error("不支持的模型类型");
}
// 发送请求
const response = await axios.get(url, config);
// 解析不同模型的响应
let models;
if (modelType === MODEL_TYPES.OPENAI) {
// OpenAI 响应格式
if (!response.data.data) {
throw new Error("OpenAI 响应格式错误");
}
models = response.data.data.map((model) => model.id);
} else {
// Ollama 响应格式
if (!response.data.models) {
throw new Error("Ollama 响应格式错误");
}
models = response.data.models.map((model) => model.name);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const responseData = await response.json();
return {
success: true,
result: models,
result: parseModelsResponse({ data: responseData }, apiConfig.apiType),
};
} catch (error) {
return {

View File

@ -188,8 +188,8 @@ const quickcommand = {
return null;
},
askAI: async function (content, apiConfig) {
return await chat(content, apiConfig);
askAI: async function (content, apiConfig, options) {
return await chat(content, apiConfig, options);
},
...systemDialog,

View File

@ -21,7 +21,6 @@
v-if="!isRunCodePage"
v-model="commandManager.state.currentCommand"
from="quickcommand"
@update:is-expanded="isConfigExpanded = $event"
:expand-on-focus="true"
class="command-config"
/>
@ -97,7 +96,6 @@ export default {
programLanguages: Object.keys(programs),
showComposer: false,
listener: null,
isConfigExpanded: false,
composerInfo: {
program: "quickcomposer",
},

View File

@ -0,0 +1,90 @@
<template>
<div>
<q-select
v-if="apiOptions.length > 0"
:model-value="modelValue"
@update:model-value="updateModelValue($event)"
:options="apiOptions"
map-options
emit-value
dense
options-dense
filled
>
<template v-slot:prepend>
<q-badge color="primary" text-color="white" class="q-mr-sm q-pa-xs">
模型
</q-badge>
</template>
<template v-slot:append>
<q-btn icon="settings" dense flat @click.stop="showAIConfig = true" />
</template>
</q-select>
<q-btn
dense
color="primary"
class="full-width q-px-sm"
icon="settings"
label="配置AI接口"
unelevated
v-else
@click="showAIConfig = true"
/>
<q-dialog v-model="showAIConfig">
<AIConfig @save="onAIConfigSave" />
</q-dialog>
</div>
</template>
<script>
import AIConfig from "components/popup/AIConfig.vue";
import { dbManager } from "js/utools.js";
import { defineComponent } from "vue";
export default defineComponent({
components: { AIConfig },
emits: ["update:modelValue"],
props: {
modelValue: {
type: Object,
required: true,
},
},
data() {
return {
showAIConfig: false,
apiOptions: [],
};
},
methods: {
onAIConfigSave() {
this.apiOptions = this.getApiOptions();
const newApiConfig = this.apiOptions.find(
(option) => option.value.id === this.modelValue.id
);
const newModelValue =
newApiConfig?.value || this.apiOptions[0].value || {};
this.updateModelValue({ ...newModelValue });
},
updateModelValue(value) {
this.$emit("update:modelValue", value);
},
getApiOptions() {
const apiConfigs = dbManager.getStorage("cfg_aiConfigs");
if (!apiConfigs) return [];
return apiConfigs.map((config) => {
return {
label: config.name,
value: config,
};
});
},
},
mounted() {
this.apiOptions = this.getApiOptions();
if (!this.modelValue.id) {
this.updateModelValue(this.apiOptions[0]?.value || {});
}
},
});
</script>

View File

@ -1,42 +1,26 @@
<template>
<div>
<q-select
v-if="apiOptions.length > 0"
:model-value="argvs.apiConfig"
@update:model-value="updateArgvs('apiConfig', $event)"
:options="apiOptions"
map-options
emit-value
dense
options-dense
filled
label="API模型"
class="q-mb-sm"
/>
<q-field filled dense v-else class="q-mb-sm">
<template #control>
<div class="flex items-center justify-center full-width text-warning">
<q-icon name="warning" class="q-mr-sm" />
<div>
未配置API模型配置方法命令配置界面-右下角菜单按钮-API配置
</div>
</div>
</template>
</q-field>
<ButtonGroup
:model-value="argvs.content.presetPrompt"
@update:modelValue="updateArgvs('content.presetPrompt', $event)"
:options="presetPromptOptions"
height="26px"
class="q-mb-sm"
/>
<VariableInput
:model-value="argvs.content.prompt"
@update:modelValue="updateArgvs('content.prompt', $event)"
label="提示词"
type="textarea"
autogrow
/>
<div class="q-pt-sm">
<AISelector
:model-value="argvs.apiConfig"
@update:modelValue="updateArgvs('apiConfig', $event)"
class="q-mb-sm"
/>
<ButtonGroup
:model-value="argvs.content.role"
@update:modelValue="updateArgvs('content.role', $event)"
:options="roleOptions"
height="26px"
class="q-mb-sm"
/>
<VariableInput
:model-value="argvs.content.prompt"
@update:modelValue="updateArgvs('content.prompt', $event)"
label="提示词"
type="textarea"
autogrow
/>
</div>
</div>
</template>
@ -46,8 +30,7 @@ import ButtonGroup from "components/composer/common/ButtonGroup.vue";
import { newVarInputVal } from "js/composer/varInputValManager";
import VariableInput from "components/composer/common/VariableInput.vue";
import { parseFunction, stringifyArgv } from "js/composer/formatString";
import { dbManager } from "js/utools.js";
import AISelector from "components/ai/AISelector.vue";
export default defineComponent({
name: "AskAIEditor",
props: {
@ -56,25 +39,26 @@ export default defineComponent({
components: {
VariableInput,
ButtonGroup,
AISelector,
},
emits: ["update:modelValue"],
data() {
return {
showAIConfig: false,
defaultArgvs: {
content: {
prompt: newVarInputVal("str"),
presetPrompt: "",
role: "",
},
apiConfig: {},
},
apiOptions: [],
presetPromptOptions: [
{ label: "自由问答", value: "" },
roleOptions: [
{ label: "无", value: "" },
{ label: "翻译", value: "translate" },
{ label: "总结", value: "summarize" },
{ label: "执行shell命令", value: "shell" },
{ label: "生成shell命令", value: "shell" },
],
modelTypeOptions: [
apiTypeOptions: [
{ label: "OpenAI", value: "openai" },
{ label: "Ollama", value: "ollama" },
],
@ -125,23 +109,6 @@ export default defineComponent({
});
},
},
mounted() {
const apiConfigs = dbManager.getStorage("cfg_aiConfigs");
this.apiOptions = apiConfigs
? apiConfigs.map((config) => {
return {
label: config.name,
value: config,
};
})
: [];
this.defaultArgvs.apiConfig = apiConfigs?.[0] || {};
const argvs = this.modelValue.argvs || this.defaultArgvs;
if (!this.modelValue.code) {
this.updateModelValue(argvs);
}
},
});
</script>

View File

@ -0,0 +1,404 @@
<template>
<q-card class="ai-dialog">
<div class="header q-px-md q-py-sm">
<q-icon name="smart_toy" size="24px" />
<div class="text-h6">AI 助手</div>
<AISelector v-model="selectedApi" />
<q-space />
<q-btn icon="close" flat round dense v-close-popup size="md" />
</div>
<!-- 聊天记录区域 -->
<q-scroll-area
ref="scrollArea"
class="chat-container"
:vertical-thumb-style="{
width: '5px',
}"
>
<div class="chat-history q-px-md">
<div
v-for="(message, index) in chatHistory"
:key="index"
class="chat-message-wrapper"
>
<div :class="['chat-message', message.role]">
<div class="avatar">
<q-avatar size="28px">
<q-icon
:name="message.role === 'user' ? 'person' : 'smart_toy'"
:color="message.role === 'user' ? 'white' : 'primary'"
size="20px"
/>
</q-avatar>
</div>
<div class="message-bubble">
<div
v-if="message.role === 'assistant'"
class="message-content markdown"
v-html="getTrimContent(message.content)"
/>
<div v-else class="message-content" v-text="message.content" />
</div>
</div>
</div>
</div>
</q-scroll-area>
<!-- 输入区域 -->
<div class="input-container q-px-md q-py-sm">
<q-input
v-model="prompt"
type="textarea"
filled
dense
autogrow
autofocus
:max-rows="3"
placeholder="请描述你的需求Enter 发送Shift+Enter 换行"
@keydown.enter.exact.prevent="handleSubmit"
@keydown.shift.enter.prevent="prompt += '\n'"
>
<template v-slot:append>
<div class="row items-center q-gutter-x-md">
<q-btn
flat
icon="delete_sweep"
size="sm"
dense
:disable="chatHistory.length === 0"
@click="clearHistory"
>
<q-tooltip>清空对话</q-tooltip>
</q-btn>
<q-btn
@click="autoUpdateCode = !autoUpdateCode"
:color="autoUpdateCode ? 'primary' : 'grey'"
icon="auto_fix_high"
size="sm"
dense
flat
>
<q-tooltip>
{{
autoUpdateCode
? "自动更新代码(已开启)"
: "自动更新代码(已关闭)"
}}
</q-tooltip>
</q-btn>
<q-btn
:color="streamingResponse ? 'negative' : 'primary'"
:icon="streamingResponse ? 'stop' : 'send'"
size="sm"
dense
flat
@click="handleSubmit"
/>
</div>
</template>
</q-input>
</div>
</q-card>
</template>
<script>
import { defineComponent } from "vue";
import AISelector from "components/ai/AISelector.vue";
import { marked } from "marked";
import DOMPurify from "dompurify";
const quickcommandApi =
require(`!raw-loader!plugins/monaco/types/quickcommand.api.d.ts`)
.default.replace(/\/\*[\s\S]*?\*\//g, "")
.replace(/\n/g, "");
const uToolsApi = require(`!raw-loader!plugins/monaco/types/utools.api.d.ts`)
.default.replace(/\/\*[\s\S]*?\*\//g, "")
.replace(/\n/g, "");
export default defineComponent({
name: "AIAssistantDialog",
components: {
AISelector,
},
data() {
return {
prompt: "",
selectedApi: {},
streamingResponse: false,
chatHistory: [],
currentRequest: null,
autoUpdateCode: true,
scrollToBottomDebounce: null,
};
},
props: {
code: {
type: String,
default: "",
},
language: {
type: String,
default: "",
},
},
emits: ["update-code"],
methods: {
scrollToBottom() {
//
if (this.scrollToBottomDebounce) {
clearTimeout(this.scrollToBottomDebounce);
}
//
this.scrollToBottomDebounce = setTimeout(() => {
const scrollArea = this.$refs.scrollArea;
if (scrollArea) {
const scrollTarget = scrollArea.getScrollTarget();
scrollArea.setScrollPosition(
"vertical",
scrollTarget.scrollHeight,
300
);
}
}, 100);
},
async handleSubmit() {
if (this.streamingResponse) {
this.stopStreaming();
return;
}
const promptText = this.prompt.trim();
if (!promptText || !this.selectedApi) return;
//
this.chatHistory.push(
{
role: "user",
content: promptText,
},
{
role: "assistant",
content: "",
}
);
//
this.$nextTick(() => {
this.scrollToBottom();
});
this.streamingResponse = true;
this.prompt = ""; //
try {
const response = await window.quickcommand.askAI(
{
prompt: promptText,
role: this.getRolePrompt(this.language),
context: this.chatHistory.slice(0, -2),
},
this.selectedApi,
{
showLoadingBar: false,
stream: true,
onStream: (text, controller, done) => {
this.currentRequest = controller;
if (text) {
this.chatHistory[this.chatHistory.length - 1].content += text;
this.$nextTick(() => {
this.scrollToBottom();
});
}
if (done) {
this.streamingResponse = false;
if (this.autoUpdateCode) {
const response =
this.chatHistory[this.chatHistory.length - 1].content;
const code = response.match(
/```[a-z]*\n([\s\S]*?)\n```/
)?.[1];
if (!code) return;
this.$emit("update-code", code);
}
}
},
}
);
if (!response.success && !response.cancelled) {
window.quickcommand.showMessageBox(response.error, "error");
}
} catch (error) {
window.quickcommand.showMessageBox(error.message, "error");
this.streamingResponse = false;
}
},
stopStreaming() {
this.streamingResponse = false;
if (this.currentRequest) {
this.currentRequest.abort();
this.currentRequest = null;
}
},
clearHistory() {
this.chatHistory = [];
},
getTrimContent(content) {
const markedContent = marked(content.trim());
// think<p>
const processedContent = markedContent
.replace("<p><think>", "<think><p>")
.replace("</think></p>", "</p></think>")
// think
.replace("<think>\n\n</think>", "");
const purifiedContent = DOMPurify.sanitize(processedContent, {
ADD_TAGS: ["think"],
});
return purifiedContent;
},
getRolePrompt(language) {
const languageMap = {
quickcommand: "NodeJS",
javascript: "NodeJS",
};
const commonInstructions = `请作为一名专业的开发专家,根据我的需求编写${languageMap[language]}代码,并请遵循以下原则:
- 编写简洁可读性强的代码
- 遵循${language}最佳实践和设计模式
- 使用恰当的命名规范和代码组织
- 添加必要的错误处理和边界检查
- 保持中文注释的准确性和专业性
- 提供必要的使用说明
`;
//
let languageSpecific = {
javascript: `- 优先使用现代ES6+特性
- 使用NodeJS原生API和模块`,
python: `- 遵循PEP8规范`,
};
languageSpecific.quickcommand = `${languageSpecific.javascript}
- 支持使用以下uTools接口 ${uToolsApi}
- 支持使用以下quickcommand接口 ${quickcommandApi}`;
// 使
const specificInstructions =
languageSpecific[language.toLowerCase()] || "";
const lastInstructions =
"\n请直接生成代码任何情况下都不需要做解释和说明";
return commonInstructions + specificInstructions + lastInstructions;
},
},
});
</script>
<style scoped>
.ai-dialog {
width: 800px;
max-width: 90vw;
height: 80vh;
display: flex;
flex-direction: column;
}
.header {
display: flex;
align-items: center;
gap: 8px;
}
.chat-container {
flex: 1 1 auto;
}
.chat-message-wrapper {
margin-bottom: 1rem;
animation: fadeIn 0.3s ease-in-out;
}
.chat-message {
display: flex;
gap: 8px;
max-width: 85%;
}
.chat-message.user {
margin-left: auto;
flex-direction: row-reverse;
}
.chat-message .avatar {
background: var(--q-primary);
border-radius: 50%;
padding: 2px;
width: 32px;
height: 32px;
flex-shrink: 0;
display: flex;
align-items: center;
justify-content: center;
}
.chat-message.assistant .avatar {
background: var(--transparent-bg-color);
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
.chat-message.user .avatar {
background: var(--q-primary);
}
.message-bubble {
padding: 8px 12px;
border-radius: 12px;
position: relative;
}
.message-content :deep(think) {
color: #8b8b8b;
display: block;
border-left: 4px solid #8b8b8b;
padding-left: 10px;
margin-bottom: 8px;
font-size: 12px;
}
.chat-message.user .message-bubble {
background-color: var(--q-primary);
color: white;
border-top-right-radius: 4px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
}
.chat-message.assistant .message-bubble {
background-color: var(--transparent-bg-color);
border-top-left-radius: 4px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
}
.message-content {
font-size: 13px;
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* 暗色模式适配 */
.body--dark .chat-message.assistant .message-bubble {
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
}
</style>

View File

@ -6,6 +6,26 @@
{{ placeholder }}
</div>
</div>
<!-- AI助手按钮 -->
<div class="ai-button-wrapper">
<q-btn
round
dense
color="primary"
icon="smart_toy"
@click="showAIDialog = true"
>
<q-tooltip>AI 助手</q-tooltip>
</q-btn>
</div>
<!-- AI对话框 -->
<q-dialog v-model="showAIDialog" position="right" seamless>
<AIAssistantDialog
:code="modelValue"
:language="language"
@update-code="setEditorValue"
/>
</q-dialog>
</div>
</template>
@ -13,6 +33,7 @@
import * as monaco from "monaco-editor";
import importAll from "js/common/importAll.js";
import { defineComponent } from "vue";
import AIAssistantDialog from "./AIAssistantDialog.vue";
//
let languageCompletions = importAll(
@ -39,6 +60,9 @@ const typeDefinitions = {
export default defineComponent({
name: "CodeEditor",
components: {
AIAssistantDialog,
},
props: {
// v-model
modelValue: {
@ -134,6 +158,7 @@ export default defineComponent({
//
cursorStyle: "line",
},
showAIDialog: false,
};
},
watch: {
@ -394,6 +419,9 @@ export default defineComponent({
formatDocument() {
editor.getAction("editor.action.formatDocument").run();
},
setEditorValue(value) {
editor.setValue(value);
},
},
computed: {
showPlaceholder() {
@ -431,4 +459,11 @@ export default defineComponent({
user-select: none;
opacity: 0.4;
}
.ai-button-wrapper {
position: absolute;
right: 30px;
bottom: 30px;
z-index: 500;
}
</style>

View File

@ -1,7 +1,6 @@
<template>
<q-expansion-item
v-model="isExpanded"
@update:model-value="$emit('update:is-expanded', $event)"
class="command-config"
expand-icon-toggle
>
@ -188,7 +187,7 @@ export default defineComponent({
default: "quickcommand",
},
},
emits: ["update:modelValue", "update:is-expanded"],
emits: ["update:modelValue"],
data() {
return {
commandManager: useCommandManager(),
@ -303,7 +302,6 @@ export default defineComponent({
},
updateExpanded(value) {
this.isExpanded = value;
this.$emit("update:is-expanded", value);
},
},
});

View File

@ -4,7 +4,7 @@
<div>
<div class="flex q-mb-md q-px-sm" style="height: 26px">
<ButtonGroup
v-model="modelToAdd"
v-model="apiToAdd"
class="col"
:options="[
{ label: 'OPENAI', value: 'openai' },
@ -27,93 +27,122 @@
width: '2px',
}"
>
<div class="config-list">
<div
v-for="(aiConfig, index) in aiConfigs"
:key="index"
class="config-item"
>
<div class="row q-col-gutter-sm">
<q-input
filled
dense
v-model="aiConfig.name"
class="col"
placeholder="请输入名称"
>
<template v-slot:prepend>
<q-badge
color="primary"
text-color="black"
label="名称"
class="q-pa-xs"
/>
</template>
<template v-slot:append>
<q-icon
color="grey"
name="remove_circle"
@click="deleteModel(index)"
size="16px"
class="cursor-pointer"
/>
</template>
</q-input>
<q-input
filled
dense
v-model="aiConfig.apiUrl"
class="col-8"
:placeholder="`${aiConfig.modelType} API地址`"
>
<template v-slot:prepend>
<q-badge
color="primary"
text-color="black"
label="接口"
class="q-pa-xs"
/>
</template>
</q-input>
<draggable
v-model="aiConfigs"
item-key="name"
handle=".drag-handle"
:animation="200"
class="config-list"
>
<template #item="{ element: aiConfig, index }">
<div class="config-item">
<div class="config-item-side-bar">
<q-icon
name="drag_indicator"
class="drag-handle cursor-move"
size="20px"
/>
</div>
<div class="config-item-content">
<div class="row q-col-gutter-sm">
<q-input
filled
dense
v-model="aiConfig.name"
class="col"
placeholder="请输入名称"
>
<template v-slot:prepend>
<q-badge
color="primary"
text-color="white"
label="名称"
class="q-pa-xs"
/>
</template>
<template v-slot:append>
<q-icon
color="grey"
name="remove_circle"
@click="deleteModel(index)"
size="16px"
class="cursor-pointer"
/>
</template>
</q-input>
<q-input
filled
dense
v-model="aiConfig.apiUrl"
class="col-7"
:placeholder="`${aiConfig.apiType} API地址`"
>
<template v-slot:prepend>
<q-badge
color="primary"
text-color="white"
label="接口"
class="q-pa-xs"
/>
</template>
</q-input>
</div>
<div class="row q-col-gutter-sm">
<q-input filled dense v-model="aiConfig.model" class="col">
<template v-slot:prepend>
<q-badge
color="primary"
text-color="white"
label="模型"
class="q-pa-xs"
/>
</template>
<template v-slot:append>
<q-btn-dropdown
flat
@click="getModels(aiConfig)"
dense
dropdown-icon="refresh"
>
<q-list>
<q-item
v-for="model in models"
:key="model"
clickable
v-close-popup
@click="aiConfig.model = model"
>
<q-item-section>
{{ model }}
</q-item-section>
</q-item>
</q-list>
</q-btn-dropdown>
<q-tooltip>获取模型</q-tooltip>
</template>
</q-input>
<q-input
filled
dense
v-model="aiConfig.apiToken"
v-if="aiConfig.apiType === 'openai'"
type="password"
class="col-7"
>
<template v-slot:prepend>
<q-badge
color="primary"
text-color="white"
label="令牌"
class="q-pa-xs"
/>
</template>
</q-input>
</div>
</div>
</div>
<div class="row q-col-gutter-sm">
<q-select
filled
dense
v-model="aiConfig.model"
:options="models"
@focus="getModels(aiConfig)"
class="col"
>
<template v-slot:prepend>
<q-badge
color="primary"
text-color="black"
label="模型"
class="q-pa-xs"
/>
</template>
</q-select>
<q-input
filled
dense
v-model="aiConfig.apiToken"
v-if="aiConfig.modelType === 'openai'"
type="password"
class="col-8"
>
<template v-slot:prepend>
<q-badge
color="primary"
text-color="black"
label="令牌"
class="q-pa-xs"
/>
</template>
</q-input>
</div>
</div>
</div>
</template>
</draggable>
</q-scroll-area>
</div>
<div class="flex justify-end q-gutter-sm q-px-sm">
@ -133,19 +162,23 @@
import { defineComponent } from "vue";
import { dbManager } from "js/utools.js";
import ButtonGroup from "components/composer/common/ButtonGroup.vue";
import draggable from "vuedraggable";
import { getUniqueId } from "js/common/uuid.js";
export default defineComponent({
name: "AIConfig",
components: {
ButtonGroup,
draggable,
},
data() {
return {
modelToAdd: "openai",
apiToAdd: "openai",
aiConfigs: [],
models: [],
};
},
emits: ["save"],
methods: {
async getModels(aiConfig) {
const { success, result, error } = await window.getModelsFromAiApi(
@ -153,6 +186,7 @@ export default defineComponent({
);
if (!success) {
quickcommand.showMessageBox(error, "error");
this.models = [];
return;
}
this.models = result;
@ -162,17 +196,19 @@ export default defineComponent({
"cfg_aiConfigs",
window.lodashM.cloneDeep(this.aiConfigs)
);
this.$emit("save");
},
deleteModel(index) {
this.aiConfigs.splice(index, 1);
},
addModel() {
this.aiConfigs.push({
modelType: this.modelToAdd,
apiType: this.apiToAdd,
apiUrl: "",
apiToken: "",
model: "",
name: "",
id: getUniqueId(),
});
},
getConfigListHeight() {
@ -188,7 +224,7 @@ export default defineComponent({
<style scoped>
.config-list,
.config-item {
.config-item-content {
display: flex;
flex-direction: column;
gap: 8px;
@ -198,5 +234,21 @@ export default defineComponent({
border: 1px solid var(--q-primary);
border-radius: 4px;
padding: 8px;
display: flex;
}
.config-item-side-bar {
width: 20px;
padding-top: 8px;
}
.config-item-content {
flex: 1;
}
.drag-handle {
cursor: move;
color: var(--q-primary);
margin-right: 4px;
}
</style>

View File

@ -9,6 +9,8 @@
border-radius: 6px;
overflow-x: auto;
margin: 2px 0;
overflow-x: auto;
max-width: 100%;
}
.markdown code {
@ -74,3 +76,12 @@
border-left-color: #444;
color: #999;
}
.markdown code ::-webkit-scrollbar {
height: 5px;
}
.markdown a {
color: #007bff;
text-decoration: none;
}

View File

@ -1,5 +1,3 @@
import { newVarInputVal } from "js/composer/varInputValManager";
export const aiCommands = {
label: "AI操作",
icon: "smart_toy",

View File

@ -892,16 +892,23 @@ interface quickcommandApi {
/**
* AI
* @param content
* @param content.prompt
* @param content.role
* @param apiConfig API配置
* @param apiConfig.apiType openai/ollama
* @param apiConfig.apiUrl API地址
* @param apiConfig.apiToken API令牌 OpenAI
* @param apiConfig.model
* @param options
* @param options.showLoadingBar
* @example
* // OpenAI 示例
* const response = await quickcommand.askAI(
* {
* prompt: "你好",
* presetPrompt: "" // 使用预设提示词translate/shell/summarize
* },
* {
* modelType: "openai",
* apiType: "openai",
* apiUrl: "https://api.openai.com/v1/chat/completions",
* apiToken: "your-api-token",
* model: "gpt-3.5-turbo"
@ -912,10 +919,10 @@ interface quickcommandApi {
* const response = await quickcommand.askAI(
* {
* prompt: "查找进程名为chrome的进程并关闭",
* presetPrompt: "shell"
* role: "shell"
* },
* {
* modelType: "ollama",
* apiType: "ollama",
* apiUrl: "http://localhost:11434/api/generate",
* model: "qwen2.5:32b"
* }
@ -925,18 +932,22 @@ interface quickcommandApi {
content: {
/** 提示词 */
prompt: string;
/** 预设提示词类型 */
presetPrompt?: "" | "translate" | "shell" | "summarize";
/** 预设角色 */
role?: "translate" | "shell" | "summarize";
},
apiConfig: {
/** 模型类型openai/ollama */
modelType: "openai" | "ollama";
apiType: "openai" | "ollama";
/** API地址 */
apiUrl: string;
/** API令牌仅 OpenAI 需要) */
apiToken?: string;
/** 模型名称 */
model: string;
},
options?: {
/** 是否显示加载条, 默认 true */
showLoadingBar?: boolean;
}
): Promise<{
/** 是否成功 */