mirror of
https://github.com/fofolee/uTools-quickcommand.git
synced 2025-12-15 07:05:21 +08:00
Compare commits
1 Commits
master
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71c5b70fea |
4
build.sh
4
build.sh
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
git pull
|
||||
cd plugin && npm i && cd .. && npm i
|
||||
quasar build
|
||||
4
dev.sh
4
dev.sh
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
git pull
|
||||
cd plugin && npm i && cd .. && npm i
|
||||
quasar dev
|
||||
12
package-lock.json
generated
12
package-lock.json
generated
@@ -6251,9 +6251,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/http-proxy-middleware": {
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz",
|
||||
"integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==",
|
||||
"version": "2.0.9",
|
||||
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz",
|
||||
"integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -15697,9 +15697,9 @@
|
||||
}
|
||||
},
|
||||
"http-proxy-middleware": {
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz",
|
||||
"integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==",
|
||||
"version": "2.0.9",
|
||||
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz",
|
||||
"integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@types/http-proxy": "^1.17.8",
|
||||
|
||||
144
plugin/lib/ai.js
144
plugin/lib/ai.js
@@ -17,7 +17,6 @@ window.aiResponseParser = (content) => {
|
||||
const API_TYPES = {
|
||||
OPENAI: "openai",
|
||||
OLLAMA: "ollama",
|
||||
UTOOLS: "utools",
|
||||
};
|
||||
|
||||
// 角色提示词
|
||||
@@ -123,7 +122,7 @@ function buildRequestData(content, apiConfig) {
|
||||
const roleMessage = rolePrompt
|
||||
? [
|
||||
{
|
||||
role: "system",
|
||||
role: "user",
|
||||
content: rolePrompt,
|
||||
},
|
||||
]
|
||||
@@ -152,6 +151,21 @@ function buildRequestData(content, apiConfig) {
|
||||
};
|
||||
}
|
||||
|
||||
// 处理普通响应
|
||||
function parseResponse(response, apiType) {
|
||||
if (apiType === API_TYPES.OPENAI) {
|
||||
if (!response.data.choices || !response.data.choices[0]) {
|
||||
throw new Error("OpenAI 响应格式错误");
|
||||
}
|
||||
return response.data.choices[0].message.content;
|
||||
} else {
|
||||
if (!response.data.message) {
|
||||
throw new Error("Ollama 响应格式错误");
|
||||
}
|
||||
return response.data.message.content;
|
||||
}
|
||||
}
|
||||
|
||||
// 处理模型列表响应
|
||||
function parseModelsResponse(response, apiType) {
|
||||
if (apiType === API_TYPES.OPENAI) {
|
||||
@@ -167,24 +181,6 @@ function parseModelsResponse(response, apiType) {
|
||||
}
|
||||
}
|
||||
|
||||
let reasoning_content_start = false;
|
||||
function processContentWithReason(response, onStream) {
|
||||
if (response.reasoning_content) {
|
||||
if (!reasoning_content_start) {
|
||||
reasoning_content_start = true;
|
||||
onStream("<think>", false);
|
||||
}
|
||||
onStream(response.reasoning_content, false);
|
||||
}
|
||||
if (response.content) {
|
||||
if (reasoning_content_start) {
|
||||
reasoning_content_start = false;
|
||||
onStream("</think>", false);
|
||||
}
|
||||
onStream(response.content, false);
|
||||
}
|
||||
}
|
||||
|
||||
// 处理 OpenAI 流式响应
|
||||
async function handleOpenAIStreamResponse(line, onStream) {
|
||||
if (line.startsWith("data:")) {
|
||||
@@ -194,9 +190,9 @@ async function handleOpenAIStreamResponse(line, onStream) {
|
||||
return;
|
||||
}
|
||||
const json = JSON.parse(jsonStr);
|
||||
const response = json.choices[0]?.delta;
|
||||
if (response) {
|
||||
processContentWithReason(response, onStream);
|
||||
const content = json.choices[0]?.delta?.content;
|
||||
if (content) {
|
||||
onStream(content, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -208,30 +204,13 @@ async function handleOllamaStreamResponse(line, onStream) {
|
||||
onStream("", true);
|
||||
return;
|
||||
}
|
||||
const response = json.message;
|
||||
if (response) {
|
||||
processContentWithReason(response, onStream);
|
||||
if (json.message?.content) {
|
||||
onStream(json.message.content, false);
|
||||
}
|
||||
}
|
||||
|
||||
// 处理 uTools AI 流式响应
|
||||
async function handleUToolsAIStreamResponse(response, onStream) {
|
||||
processContentWithReason(response, onStream);
|
||||
}
|
||||
|
||||
// 处理流式响应
|
||||
async function handleStreamResponse(response, apiConfig, onStream) {
|
||||
// 处理 uTools AI 响应
|
||||
if (apiConfig.apiType === API_TYPES.UTOOLS) {
|
||||
try {
|
||||
await handleUToolsAIStreamResponse(response, onStream);
|
||||
return { success: true };
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// 处理其他 API 的流式响应
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = "";
|
||||
@@ -304,27 +283,24 @@ async function chat(content, apiConfig, options = {}) {
|
||||
} = options;
|
||||
|
||||
// 验证必要参数
|
||||
if (apiConfig.apiType === API_TYPES.UTOOLS) {
|
||||
if (!content.prompt || !apiConfig.model) {
|
||||
throw new Error("模型名称和提示词不能为空");
|
||||
}
|
||||
} else {
|
||||
if (!apiConfig.apiUrl) {
|
||||
throw new Error("API地址不能为空");
|
||||
}
|
||||
if (!apiConfig.apiUrl || !content.prompt || !apiConfig.model) {
|
||||
throw new Error("API地址、模型名称和提示词不能为空");
|
||||
}
|
||||
if (!apiConfig.apiUrl || !content.prompt || !apiConfig.model) {
|
||||
throw new Error("API地址、模型名称和提示词不能为空");
|
||||
}
|
||||
|
||||
let controller;
|
||||
// 构建请求URL和配置
|
||||
const url = buildApiUrl(
|
||||
apiConfig.apiUrl,
|
||||
API_ENDPOINTS[apiConfig.apiType].chat
|
||||
);
|
||||
const config = buildRequestConfig(apiConfig);
|
||||
const requestData = buildRequestData(content, apiConfig);
|
||||
|
||||
// 显示进度条
|
||||
const processBar = showProcessBar
|
||||
? await quickcommand.showProcessBar({
|
||||
text: "AI思考中...",
|
||||
onClose: () => {
|
||||
if (typeof controller !== "undefined") {
|
||||
if (controller) {
|
||||
controller.abort();
|
||||
}
|
||||
},
|
||||
@@ -351,65 +327,11 @@ async function chat(content, apiConfig, options = {}) {
|
||||
onStream(chunk, isDone);
|
||||
};
|
||||
|
||||
// 处理 uTools AI 请求
|
||||
if (apiConfig.apiType === API_TYPES.UTOOLS) {
|
||||
try {
|
||||
const messages = buildRequestData(content, apiConfig).messages;
|
||||
controller = utools.ai(
|
||||
{
|
||||
model: apiConfig.model,
|
||||
messages: messages,
|
||||
},
|
||||
(chunk) => {
|
||||
handleUToolsAIStreamResponse(chunk, streamHandler);
|
||||
}
|
||||
);
|
||||
onFetch(controller);
|
||||
|
||||
await controller;
|
||||
|
||||
// 在流式响应完全结束后,发送一个空字符串表示结束
|
||||
streamHandler("", true);
|
||||
|
||||
// 完成时更新进度条并关闭
|
||||
if (processBar) {
|
||||
quickcommand.updateProcessBar(
|
||||
{
|
||||
text: "AI响应完成",
|
||||
complete: true,
|
||||
},
|
||||
processBar
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
result: fullResponse,
|
||||
};
|
||||
} catch (error) {
|
||||
if (error.name === "AbortError") {
|
||||
return {
|
||||
success: false,
|
||||
error: "请求已取消",
|
||||
cancelled: true,
|
||||
};
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// 统一使用 fetch 处理其他 API 请求
|
||||
controller = new AbortController();
|
||||
// 统一使用 fetch 处理请求
|
||||
const controller = new AbortController();
|
||||
|
||||
onFetch(controller);
|
||||
|
||||
const url = buildApiUrl(
|
||||
apiConfig.apiUrl,
|
||||
API_ENDPOINTS[apiConfig.apiType].chat
|
||||
);
|
||||
const config = buildRequestConfig(apiConfig);
|
||||
const requestData = buildRequestData(content, apiConfig);
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: config.headers,
|
||||
|
||||
@@ -520,10 +520,6 @@ document.addEventListener("DOMContentLoaded", () => {
|
||||
if (dialogType === "textarea" && !e.ctrlKey) {
|
||||
return;
|
||||
}
|
||||
// select 类型有自己的键盘处理器,不需要全局处理器处理 Enter 键
|
||||
if (dialogType === "select") {
|
||||
return;
|
||||
}
|
||||
document.getElementById("ok-btn").click();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -173,10 +173,10 @@ export default defineComponent({
|
||||
} else {
|
||||
let option =
|
||||
command.program === "custom"
|
||||
? command.customOptions || {}
|
||||
: this.programs[command.program] || {};
|
||||
option.scptarg = command.scptarg || "";
|
||||
option.charset = command.charset || {};
|
||||
? command.customOptions
|
||||
: this.programs[command.program];
|
||||
option.scptarg = command.scptarg;
|
||||
option.charset = command.charset;
|
||||
window.runCodeFile(
|
||||
commandCode,
|
||||
option,
|
||||
|
||||
@@ -170,8 +170,8 @@ export default {
|
||||
getCommandOpt(command) {
|
||||
let option =
|
||||
command.program === "custom"
|
||||
? command.customOptions || {}
|
||||
: programs[command.program] || {};
|
||||
? command.customOptions
|
||||
: programs[command.program];
|
||||
option.scptarg = command.scptarg || "";
|
||||
option.charset = command.charset || {};
|
||||
option.envPath = this.$root.nativeProfile.envPath.trim() || "";
|
||||
|
||||
@@ -213,7 +213,7 @@ export default defineComponent({
|
||||
const response = await window.quickcommand.askAI(
|
||||
{
|
||||
prompt: promptText,
|
||||
context: [presetContext, ...this.chatHistory.slice(0, -2)],
|
||||
context: [...presetContext, ...this.chatHistory.slice(0, -2)],
|
||||
},
|
||||
this.selectedApi,
|
||||
{
|
||||
@@ -291,7 +291,7 @@ export default defineComponent({
|
||||
shell: "liunx shell脚本",
|
||||
};
|
||||
const languageName = languageMap[language] || language;
|
||||
const commonInstructions = `接下来所有的对话中的需求都请通过编写${languageName}代码来实现,并请遵循以下原则:
|
||||
const commonInstructions = `接下来我所有的对话中的需求都请通过编写${languageName}代码来实现,并请遵循以下原则:
|
||||
- 编写简洁、可读性强的代码
|
||||
- 遵循${languageName}最佳实践和设计模式
|
||||
- 使用恰当的命名规范和代码组织
|
||||
@@ -312,7 +312,7 @@ export default defineComponent({
|
||||
const specificInstructions = languageSpecific[language] || "";
|
||||
|
||||
const lastInstructions =
|
||||
"\n请直接提供MARKDOWN格式的代码(以```脚本语言开头,以```结尾),任何情况下都不需要做解释和说明";
|
||||
"\n请直接给我MARKDOWN格式的代码(以```脚本语言开头,以```结尾),任何情况下都不需要做解释和说明";
|
||||
|
||||
return commonInstructions + specificInstructions + lastInstructions;
|
||||
},
|
||||
@@ -330,26 +330,48 @@ export default defineComponent({
|
||||
];
|
||||
},
|
||||
getPresetContext() {
|
||||
let finnalPrompt = ""
|
||||
|
||||
const languagePrompt = this.getLanguagePrompt(this.language);
|
||||
|
||||
finnalPrompt += languagePrompt;
|
||||
let presetContext = [
|
||||
{
|
||||
role: "user",
|
||||
content: languagePrompt,
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "好的,我会严格按照你的要求编写代码。",
|
||||
},
|
||||
];
|
||||
|
||||
if (this.submitDocs && this.language === "quickcommand") {
|
||||
const docs = this.getLanguageDocs(this.language);
|
||||
|
||||
finnalPrompt += `\n你现在使用的是一种特殊的环境,支持uTools和quickcommand两种特殊的接口,请优先使用uTools和quickcommand接口解决需求,然后再使用当前语言通用的解决方案`;
|
||||
presetContext.push(
|
||||
{
|
||||
role: "user",
|
||||
content: `你现在使用的是一种特殊的环境,支持uTools和quickcommand两种特殊的接口,请优先使用uTools和quickcommand接口解决需求,然后再使用当前语言通用的解决方案`,
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "好的,我会注意。",
|
||||
}
|
||||
);
|
||||
|
||||
docs.forEach((doc) => {
|
||||
finnalPrompt += `\n这是${doc.name}的API文档:\n${doc.api}`;
|
||||
presetContext.push(
|
||||
{
|
||||
role: "user",
|
||||
content: `这是${doc.name}的API文档:\n${doc.api}`,
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "好的,我会认真学习并记住这些接口。",
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
role: "system",
|
||||
content: finnalPrompt,
|
||||
};
|
||||
return presetContext;
|
||||
},
|
||||
openAIAssistantHelp() {
|
||||
window.showUb.help("#KUCwm");
|
||||
|
||||
@@ -2,20 +2,25 @@
|
||||
<q-card style="width: 800px" class="q-pa-sm">
|
||||
<div class="text-h5 q-my-md q-px-sm">API配置</div>
|
||||
<div>
|
||||
<div class="q-pa-sm row q-gutter-sm">
|
||||
<q-btn
|
||||
v-for="option in aiOptions"
|
||||
:key="option.value"
|
||||
icon="add_link"
|
||||
dense
|
||||
color="primary"
|
||||
:label="option.label"
|
||||
@click="addModel(option.value)"
|
||||
<div class="flex q-mb-md q-px-sm" style="height: 26px">
|
||||
<ButtonGroup
|
||||
v-model="apiToAdd"
|
||||
class="col"
|
||||
:options="[
|
||||
{ label: 'OPENAI', value: 'openai' },
|
||||
{ label: 'OLLAMA', value: 'ollama' },
|
||||
]"
|
||||
height="26px"
|
||||
/>
|
||||
<q-icon
|
||||
name="add_box"
|
||||
@click="addModel"
|
||||
color="primary"
|
||||
size="26px"
|
||||
class="cursor-pointer q-ml-sm"
|
||||
/>
|
||||
</div>
|
||||
<q-scroll-area
|
||||
ref="scrollArea"
|
||||
:style="`height: ${getConfigListHeight()}px;`"
|
||||
class="q-px-sm"
|
||||
:vertical-thumb-style="{
|
||||
@@ -75,7 +80,6 @@
|
||||
? '例:https://api.openai.com'
|
||||
: '例:http://localhost:11434'
|
||||
"
|
||||
v-show="aiConfig.apiType !== 'utools'"
|
||||
>
|
||||
<template v-slot:prepend>
|
||||
<q-badge
|
||||
@@ -175,33 +179,21 @@
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import { defineComponent, ref } from "vue";
|
||||
import { defineComponent } from "vue";
|
||||
import { dbManager } from "js/utools.js";
|
||||
import ButtonGroup from "components/composer/common/ButtonGroup.vue";
|
||||
import draggable from "vuedraggable";
|
||||
import { getUniqueId } from "js/common/uuid.js";
|
||||
|
||||
export default defineComponent({
|
||||
name: "AIConfig",
|
||||
components: {
|
||||
ButtonGroup,
|
||||
draggable,
|
||||
},
|
||||
setup() {
|
||||
const initAiOptions = utools.allAiModels
|
||||
? [{ label: "uTools内置AI", value: "utools" }]
|
||||
: [];
|
||||
|
||||
const aiOptions = ref([
|
||||
...initAiOptions,
|
||||
{ label: "OPENAI接口(需Key)", value: "openai" },
|
||||
{ label: "OLLAMA接口", value: "ollama" },
|
||||
]);
|
||||
|
||||
return {
|
||||
aiOptions,
|
||||
};
|
||||
},
|
||||
data() {
|
||||
return {
|
||||
apiToAdd: "openai",
|
||||
aiConfigs: [],
|
||||
models: [],
|
||||
tokenInputTypes: [],
|
||||
@@ -210,19 +202,6 @@ export default defineComponent({
|
||||
emits: ["save"],
|
||||
methods: {
|
||||
async getModels(aiConfig) {
|
||||
if (aiConfig.apiType === "utools") {
|
||||
try {
|
||||
const models = await utools.allAiModels();
|
||||
this.models = models.map((model) => model.id);
|
||||
} catch (error) {
|
||||
quickcommand.showMessageBox(
|
||||
"获取 uTools AI 模型失败: " + error.message,
|
||||
"error"
|
||||
);
|
||||
this.models = [];
|
||||
}
|
||||
return;
|
||||
}
|
||||
const { success, result, error } = await window.getModelsFromAiApi(
|
||||
aiConfig
|
||||
);
|
||||
@@ -243,22 +222,15 @@ export default defineComponent({
|
||||
deleteModel(index) {
|
||||
this.aiConfigs.splice(index, 1);
|
||||
},
|
||||
addModel(apiType) {
|
||||
const defaultConfig = {
|
||||
addModel() {
|
||||
this.aiConfigs.push({
|
||||
id: getUniqueId(),
|
||||
apiType: apiType,
|
||||
apiType: this.apiToAdd,
|
||||
apiUrl: "",
|
||||
apiToken: "",
|
||||
model: "",
|
||||
name: "",
|
||||
};
|
||||
|
||||
if (apiType === "utools") {
|
||||
defaultConfig.apiUrl = "";
|
||||
}
|
||||
|
||||
this.aiConfigs.unshift(defaultConfig);
|
||||
|
||||
});
|
||||
},
|
||||
getConfigListHeight() {
|
||||
const counts = Math.min(this.aiConfigs.length, 3);
|
||||
|
||||
@@ -46,7 +46,7 @@ export const controlCommands = {
|
||||
{
|
||||
label: "结束",
|
||||
value: "end",
|
||||
codeTemplate: "};",
|
||||
codeTemplate: "}",
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -112,7 +112,7 @@ export const controlCommands = {
|
||||
{
|
||||
label: "结束",
|
||||
value: "end",
|
||||
codeTemplate: "};",
|
||||
codeTemplate: "}",
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -169,7 +169,7 @@ export const controlCommands = {
|
||||
{
|
||||
label: "结束",
|
||||
value: "end",
|
||||
codeTemplate: "};",
|
||||
codeTemplate: "}",
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -225,7 +225,7 @@ export const controlCommands = {
|
||||
{
|
||||
label: "结束",
|
||||
value: "end",
|
||||
codeTemplate: "};",
|
||||
codeTemplate: "}",
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -266,7 +266,7 @@ export const controlCommands = {
|
||||
{
|
||||
label: "结束",
|
||||
value: "end",
|
||||
codeTemplate: "};",
|
||||
codeTemplate: "}",
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -320,7 +320,7 @@ export const controlCommands = {
|
||||
{
|
||||
label: "结束",
|
||||
value: "end",
|
||||
codeTemplate: "};",
|
||||
codeTemplate: "}",
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -360,7 +360,7 @@ export const controlCommands = {
|
||||
{
|
||||
label: "结束",
|
||||
value: "end",
|
||||
codeTemplate: "};",
|
||||
codeTemplate: "}",
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
@@ -124,7 +124,7 @@ export function generateCode(flow) {
|
||||
if (cmd.asyncMode === "await") {
|
||||
cmdCode = `await ${cmdCode}`;
|
||||
}
|
||||
code.push(indent + cmdCode + (cmd.isControlFlow ? "" : comma));
|
||||
code.push(indent + cmdCode + comma);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -70,6 +70,8 @@ export function generateUBrowserCode(argvs) {
|
||||
// 添加其他操作
|
||||
if (argvs.operations?.length) {
|
||||
argvs.operations.forEach(({ value, args }) => {
|
||||
if (!args?.length) return;
|
||||
|
||||
const stringifiedArgs = args
|
||||
.map((arg) => stringifyArgv(arg))
|
||||
.filter(Boolean);
|
||||
|
||||
@@ -491,13 +491,13 @@ export const ubrowserOperationConfigs = {
|
||||
],
|
||||
},
|
||||
setValue: {
|
||||
value: "value",
|
||||
value: "setValue",
|
||||
label: "设置值",
|
||||
icon: "check_box",
|
||||
config: [
|
||||
{
|
||||
label: "元素选择器",
|
||||
icon: "find_in_page",
|
||||
icon: "varInput",
|
||||
component: "VariableInput",
|
||||
width: 6,
|
||||
},
|
||||
|
||||
128
src/plugins/monaco/types/utools.api.d.ts
vendored
128
src/plugins/monaco/types/utools.api.d.ts
vendored
@@ -667,134 +667,6 @@ interface UToolsApi {
|
||||
isLinux(): boolean;
|
||||
|
||||
ubrowser: UBrowser;
|
||||
|
||||
/**
|
||||
* 调用 AI 能力,支持 Function Calling
|
||||
* @param option AI 选项
|
||||
* @param streamCallback 流式调用函数 (可选)
|
||||
* @returns 返回定制的 PromiseLike
|
||||
*/
|
||||
ai(option: AiOption): PromiseLike<Message>;
|
||||
ai(
|
||||
option: AiOption,
|
||||
streamCallback: (chunk: Message) => void
|
||||
): PromiseLike<void>;
|
||||
|
||||
/**
|
||||
* 获取所有 AI 模型
|
||||
* @returns 返回 AI 模型数组
|
||||
*/
|
||||
allAiModels(): Promise<AiModel[]>;
|
||||
}
|
||||
|
||||
/**
|
||||
* AI 选项接口
|
||||
*/
|
||||
interface AiOption {
|
||||
/**
|
||||
* AI 模型, 为空默认使用 deepseek-v3
|
||||
*/
|
||||
model?: string;
|
||||
/**
|
||||
* 消息列表
|
||||
*/
|
||||
messages: Message[];
|
||||
/**
|
||||
* 工具列表
|
||||
*/
|
||||
tools?: Tool[];
|
||||
}
|
||||
|
||||
/**
|
||||
* AI 消息接口
|
||||
*/
|
||||
interface Message {
|
||||
/**
|
||||
* 消息角色
|
||||
* system:系统消息
|
||||
* user:用户消息
|
||||
* assistant:AI 消息
|
||||
*/
|
||||
role: "system" | "user" | "assistant";
|
||||
/**
|
||||
* 消息内容
|
||||
*/
|
||||
content?: string;
|
||||
/**
|
||||
* 消息推理内容,一般只有推理模型会返回
|
||||
*/
|
||||
reasoning_content?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* AI 工具接口
|
||||
*/
|
||||
interface Tool {
|
||||
/**
|
||||
* 工具类型
|
||||
* function:函数工具
|
||||
*/
|
||||
type: "function";
|
||||
/**
|
||||
* 函数工具配置
|
||||
*/
|
||||
function?: {
|
||||
/**
|
||||
* 函数名称
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* 函数描述
|
||||
*/
|
||||
description: string;
|
||||
/**
|
||||
* 函数参数
|
||||
*/
|
||||
parameters: {
|
||||
type: "object";
|
||||
properties: Record<string, any>;
|
||||
};
|
||||
/**
|
||||
* 必填参数
|
||||
*/
|
||||
required?: string[];
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* AI 模型接口
|
||||
*/
|
||||
interface AiModel {
|
||||
/**
|
||||
* AI 模型 ID,用于 utools.ai 调用的 model 参数
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* AI 模型名称
|
||||
*/
|
||||
label: string;
|
||||
/**
|
||||
* AI 模型描述
|
||||
*/
|
||||
description: string;
|
||||
/**
|
||||
* AI 模型图标
|
||||
*/
|
||||
icon: string;
|
||||
/**
|
||||
* AI 模型调用消耗
|
||||
*/
|
||||
cost: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Promise 扩展类型,包含 abort() 函数
|
||||
*/
|
||||
interface PromiseLike<T> extends Promise<T> {
|
||||
/**
|
||||
* 中止 AI 调用
|
||||
*/
|
||||
abort(): void;
|
||||
}
|
||||
|
||||
declare var utools: UToolsApi;
|
||||
|
||||
Reference in New Issue
Block a user