Compare commits

...

1 Commits

Author SHA1 Message Date
YoVinchen
113512165d fix(claude): add gemini_chat format for Gemini-compatible endpoints (#1899) 2026-04-05 17:13:48 +08:00
11 changed files with 128 additions and 14 deletions

View File

@@ -268,6 +268,7 @@ pub struct ProviderMeta {
/// Claude API 格式(仅 Claude 供应商使用) /// Claude API 格式(仅 Claude 供应商使用)
/// - "anthropic": 原生 Anthropic Messages API直接透传 /// - "anthropic": 原生 Anthropic Messages API直接透传
/// - "openai_chat": OpenAI Chat Completions 格式,需要转换 /// - "openai_chat": OpenAI Chat Completions 格式,需要转换
/// - "gemini_chat": Gemini Chat 兼容格式,需要转换,但不注入 prompt_cache_key
/// - "openai_responses": OpenAI Responses API 格式,需要转换 /// - "openai_responses": OpenAI Responses API 格式,需要转换
#[serde(rename = "apiFormat", skip_serializing_if = "Option::is_none")] #[serde(rename = "apiFormat", skip_serializing_if = "Option::is_none")]
pub api_format: Option<String>, pub api_format: Option<String>,
@@ -282,9 +283,9 @@ pub struct ProviderMeta {
/// 是否将 base_url 视为完整 API 端点(不拼接 endpoint 路径) /// 是否将 base_url 视为完整 API 端点(不拼接 endpoint 路径)
#[serde(rename = "isFullUrl", skip_serializing_if = "Option::is_none")] #[serde(rename = "isFullUrl", skip_serializing_if = "Option::is_none")]
pub is_full_url: Option<bool>, pub is_full_url: Option<bool>,
/// Prompt cache key for OpenAI-compatible endpoints. /// Prompt cache key for OpenAI-compatible endpoints that accept it.
/// When set, injected into converted requests to improve cache hit rate. /// When set, injected into converted requests to improve cache hit rate.
/// If not set, provider ID is used automatically during format conversion. /// If not set, provider ID is used automatically during openai_chat/openai_responses conversion.
#[serde(rename = "promptCacheKey", skip_serializing_if = "Option::is_none")] #[serde(rename = "promptCacheKey", skip_serializing_if = "Option::is_none")]
pub prompt_cache_key: Option<String>, pub prompt_cache_key: Option<String>,
/// 累加模式应用中,该 provider 是否已写入 live config。 /// 累加模式应用中,该 provider 是否已写入 live config。

View File

@@ -1544,7 +1544,7 @@ fn rewrite_claude_transform_endpoint(
let target_path = if is_copilot && api_format == "openai_responses" { let target_path = if is_copilot && api_format == "openai_responses" {
"/v1/responses" "/v1/responses"
} else if is_copilot { } else if is_copilot || api_format == "gemini_chat" {
"/chat/completions" "/chat/completions"
} else if api_format == "openai_responses" { } else if api_format == "openai_responses" {
"/v1/responses" "/v1/responses"
@@ -1694,6 +1694,18 @@ mod tests {
assert_eq!(passthrough_query.as_deref(), Some("foo=bar")); assert_eq!(passthrough_query.as_deref(), Some("foo=bar"));
} }
#[test]
fn rewrite_claude_transform_endpoint_uses_gemini_chat_path() {
let (endpoint, passthrough_query) = rewrite_claude_transform_endpoint(
"/v1/messages?beta=true&foo=bar",
"gemini_chat",
false,
);
assert_eq!(endpoint, "/chat/completions?foo=bar");
assert_eq!(passthrough_query.as_deref(), Some("foo=bar"));
}
#[test] #[test]
fn rewrite_claude_transform_endpoint_strips_beta_for_responses() { fn rewrite_claude_transform_endpoint_strips_beta_for_responses() {
let (endpoint, passthrough_query) = rewrite_claude_transform_endpoint( let (endpoint, passthrough_query) = rewrite_claude_transform_endpoint(

View File

@@ -5,6 +5,7 @@
//! ## API 格式 //! ## API 格式
//! - **anthropic** (默认): Anthropic Messages API 格式,直接透传 //! - **anthropic** (默认): Anthropic Messages API 格式,直接透传
//! - **openai_chat**: OpenAI Chat Completions 格式,需要 Anthropic ↔ OpenAI 转换 //! - **openai_chat**: OpenAI Chat Completions 格式,需要 Anthropic ↔ OpenAI 转换
//! - **gemini_chat**: Gemini Chat 兼容格式,走 `/chat/completions`,且不注入 `prompt_cache_key`
//! - **openai_responses**: OpenAI Responses API 格式,需要 Anthropic ↔ Responses 转换 //! - **openai_responses**: OpenAI Responses API 格式,需要 Anthropic ↔ Responses 转换
//! //!
//! ## 认证模式 //! ## 认证模式
@@ -27,6 +28,7 @@ pub fn get_claude_api_format(provider: &Provider) -> &'static str {
if let Some(api_format) = meta.api_format.as_deref() { if let Some(api_format) = meta.api_format.as_deref() {
return match api_format { return match api_format {
"openai_chat" => "openai_chat", "openai_chat" => "openai_chat",
"gemini_chat" => "gemini_chat",
"openai_responses" => "openai_responses", "openai_responses" => "openai_responses",
_ => "anthropic", _ => "anthropic",
}; };
@@ -41,6 +43,7 @@ pub fn get_claude_api_format(provider: &Provider) -> &'static str {
{ {
return match api_format { return match api_format {
"openai_chat" => "openai_chat", "openai_chat" => "openai_chat",
"gemini_chat" => "gemini_chat",
"openai_responses" => "openai_responses", "openai_responses" => "openai_responses",
_ => "anthropic", _ => "anthropic",
}; };
@@ -66,7 +69,10 @@ pub fn get_claude_api_format(provider: &Provider) -> &'static str {
} }
pub fn claude_api_format_needs_transform(api_format: &str) -> bool { pub fn claude_api_format_needs_transform(api_format: &str) -> bool {
matches!(api_format, "openai_chat" | "openai_responses") matches!(
api_format,
"openai_chat" | "gemini_chat" | "openai_responses"
)
} }
pub fn transform_claude_request_for_api_format( pub fn transform_claude_request_for_api_format(
@@ -85,6 +91,7 @@ pub fn transform_claude_request_for_api_format(
super::transform_responses::anthropic_to_responses(body, Some(cache_key)) super::transform_responses::anthropic_to_responses(body, Some(cache_key))
} }
"openai_chat" => super::transform::anthropic_to_openai(body, Some(cache_key)), "openai_chat" => super::transform::anthropic_to_openai(body, Some(cache_key)),
"gemini_chat" => super::transform::anthropic_to_openai(body, None),
_ => Ok(body), _ => Ok(body),
} }
} }
@@ -155,6 +162,7 @@ impl ClaudeAdapter {
/// 从 provider.meta.api_format 读取格式设置: /// 从 provider.meta.api_format 读取格式设置:
/// - "anthropic" (默认): Anthropic Messages API 格式,直接透传 /// - "anthropic" (默认): Anthropic Messages API 格式,直接透传
/// - "openai_chat": OpenAI Chat Completions 格式,需要格式转换 /// - "openai_chat": OpenAI Chat Completions 格式,需要格式转换
/// - "gemini_chat": Gemini Chat 兼容格式,需要格式转换,但不注入 prompt_cache_key
/// - "openai_responses": OpenAI Responses API 格式,需要格式转换 /// - "openai_responses": OpenAI Responses API 格式,需要格式转换
fn get_api_format(&self, provider: &Provider) -> &'static str { fn get_api_format(&self, provider: &Provider) -> &'static str {
get_claude_api_format(provider) get_claude_api_format(provider)
@@ -415,10 +423,11 @@ impl ProviderAdapter for ClaudeAdapter {
// 根据 api_format 配置决定是否需要格式转换 // 根据 api_format 配置决定是否需要格式转换
// - "anthropic" (默认): 直接透传,无需转换 // - "anthropic" (默认): 直接透传,无需转换
// - "openai_chat": 需要 Anthropic ↔ OpenAI Chat Completions 格式转换 // - "openai_chat": 需要 Anthropic ↔ OpenAI Chat Completions 格式转换
// - "gemini_chat": 需要 Anthropic ↔ Gemini Chat 兼容格式转换(不注入 prompt_cache_key
// - "openai_responses": 需要 Anthropic ↔ OpenAI Responses API 格式转换 // - "openai_responses": 需要 Anthropic ↔ OpenAI Responses API 格式转换
matches!( matches!(
self.get_api_format(provider), self.get_api_format(provider),
"openai_chat" | "openai_responses" "openai_chat" | "gemini_chat" | "openai_responses"
) )
} }
@@ -726,6 +735,20 @@ mod tests {
); );
assert!(adapter.needs_transform(&openai_chat_provider)); assert!(adapter.needs_transform(&openai_chat_provider));
// Gemini Chat format in meta: needs transform
let gemini_chat_provider = create_provider_with_meta(
json!({
"env": {
"ANTHROPIC_BASE_URL": "https://generativelanguage.googleapis.com/v1beta/openai"
}
}),
ProviderMeta {
api_format: Some("gemini_chat".to_string()),
..Default::default()
},
);
assert!(adapter.needs_transform(&gemini_chat_provider));
// OpenAI Responses format in meta: needs transform // OpenAI Responses format in meta: needs transform
let openai_responses_provider = create_provider_with_meta( let openai_responses_provider = create_provider_with_meta(
json!({ json!({
@@ -854,4 +877,31 @@ mod tests {
assert!(transformed.get("input").is_some()); assert!(transformed.get("input").is_some());
assert!(transformed.get("max_output_tokens").is_some()); assert!(transformed.get("max_output_tokens").is_some());
} }
#[test]
fn test_transform_claude_request_for_api_format_gemini_chat_omits_prompt_cache_key() {
let provider = create_provider_with_meta(
json!({
"env": {
"ANTHROPIC_BASE_URL": "https://generativelanguage.googleapis.com/v1beta/openai"
}
}),
ProviderMeta {
prompt_cache_key: Some("custom-cache-key".to_string()),
..Default::default()
},
);
let body = json!({
"model": "gemini-2.5-flash",
"messages": [{ "role": "user", "content": "hello" }],
"max_tokens": 128
});
let transformed =
transform_claude_request_for_api_format(body, &provider, "gemini_chat").unwrap();
assert_eq!(transformed["model"], "gemini-2.5-flash");
assert!(transformed.get("messages").is_some());
assert!(transformed.get("prompt_cache_key").is_none());
}
} }

View File

@@ -309,6 +309,7 @@ impl StreamCheckService {
/// 根据供应商的 api_format 选择请求格式: /// 根据供应商的 api_format 选择请求格式:
/// - "anthropic" (默认): Anthropic Messages API (/v1/messages) /// - "anthropic" (默认): Anthropic Messages API (/v1/messages)
/// - "openai_chat": OpenAI Chat Completions API (/v1/chat/completions) /// - "openai_chat": OpenAI Chat Completions API (/v1/chat/completions)
/// - "gemini_chat": Gemini Chat 兼容 API (/chat/completions, 不注入 prompt_cache_key)
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn check_claude_stream( async fn check_claude_stream(
client: &Client, client: &Client,
@@ -344,6 +345,7 @@ impl StreamCheckService {
.and_then(|meta| meta.is_full_url) .and_then(|meta| meta.is_full_url)
.unwrap_or(false); .unwrap_or(false);
let is_openai_chat = effective_api_format == "openai_chat"; let is_openai_chat = effective_api_format == "openai_chat";
let is_gemini_chat = effective_api_format == "gemini_chat";
let is_openai_responses = effective_api_format == "openai_responses"; let is_openai_responses = effective_api_format == "openai_responses";
let url = let url =
Self::resolve_claude_stream_url(base, auth.strategy, effective_api_format, is_full_url); Self::resolve_claude_stream_url(base, auth.strategy, effective_api_format, is_full_url);
@@ -360,6 +362,9 @@ impl StreamCheckService {
let body = if is_openai_responses { let body = if is_openai_responses {
anthropic_to_responses(anthropic_body, Some(&provider.id)) anthropic_to_responses(anthropic_body, Some(&provider.id))
.map_err(|e| AppError::Message(format!("Failed to build test request: {e}")))? .map_err(|e| AppError::Message(format!("Failed to build test request: {e}")))?
} else if is_gemini_chat {
anthropic_to_openai(anthropic_body, None)
.map_err(|e| AppError::Message(format!("Failed to build test request: {e}")))?
} else if is_openai_chat { } else if is_openai_chat {
anthropic_to_openai(anthropic_body, Some(&provider.id)) anthropic_to_openai(anthropic_body, Some(&provider.id))
.map_err(|e| AppError::Message(format!("Failed to build test request: {e}")))? .map_err(|e| AppError::Message(format!("Failed to build test request: {e}")))?
@@ -395,7 +400,7 @@ impl StreamCheckService {
.header("x-vscode-user-agent-library-version", "electron-fetch") .header("x-vscode-user-agent-library-version", "electron-fetch")
.header("x-request-id", &request_id) .header("x-request-id", &request_id)
.header("x-agent-task-id", &request_id); .header("x-agent-task-id", &request_id);
} else if is_openai_chat || is_openai_responses { } else if is_openai_chat || is_gemini_chat || is_openai_responses {
// OpenAI-compatible targets: Bearer auth + SSE headers only // OpenAI-compatible targets: Bearer auth + SSE headers only
request_builder = request_builder request_builder = request_builder
.header("authorization", format!("Bearer {}", auth.api_key)) .header("authorization", format!("Bearer {}", auth.api_key))
@@ -761,7 +766,7 @@ impl StreamCheckService {
if is_github_copilot && api_format == "openai_responses" { if is_github_copilot && api_format == "openai_responses" {
format!("{base}/v1/responses") format!("{base}/v1/responses")
} else if is_github_copilot { } else if is_github_copilot || api_format == "gemini_chat" {
format!("{base}/chat/completions") format!("{base}/chat/completions")
} else if api_format == "openai_responses" { } else if api_format == "openai_responses" {
if base.ends_with("/v1") { if base.ends_with("/v1") {
@@ -955,6 +960,21 @@ mod tests {
assert_eq!(url, "https://example.com/v1/chat/completions"); assert_eq!(url, "https://example.com/v1/chat/completions");
} }
#[test]
fn test_resolve_claude_stream_url_for_gemini_chat() {
let url = StreamCheckService::resolve_claude_stream_url(
"https://generativelanguage.googleapis.com/v1beta/openai",
AuthStrategy::Bearer,
"gemini_chat",
false,
);
assert_eq!(
url,
"https://generativelanguage.googleapis.com/v1beta/openai/chat/completions"
);
}
#[test] #[test]
fn test_resolve_claude_stream_url_for_openai_responses() { fn test_resolve_claude_stream_url_for_openai_responses() {
let url = StreamCheckService::resolve_claude_stream_url( let url = StreamCheckService::resolve_claude_stream_url(

View File

@@ -416,9 +416,11 @@ export function ClaudeFormFields({
hint={ hint={
apiFormat === "openai_responses" apiFormat === "openai_responses"
? t("providerForm.apiHintResponses") ? t("providerForm.apiHintResponses")
: apiFormat === "openai_chat" : apiFormat === "gemini_chat"
? t("providerForm.apiHintOAI") ? t("providerForm.apiHintGeminiChat")
: t("providerForm.apiHint") : apiFormat === "openai_chat"
? t("providerForm.apiHintOAI")
: t("providerForm.apiHint")
} }
onManageClick={() => onEndpointModalToggle(true)} onManageClick={() => onEndpointModalToggle(true)}
showFullUrlToggle={true} showFullUrlToggle={true}
@@ -488,6 +490,11 @@ export function ClaudeFormFields({
defaultValue: "OpenAI Chat Completions (需转换)", defaultValue: "OpenAI Chat Completions (需转换)",
})} })}
</SelectItem> </SelectItem>
<SelectItem value="gemini_chat">
{t("providerForm.apiFormatGeminiChat", {
defaultValue: "Gemini Chat Compatible (需开启代理)",
})}
</SelectItem>
<SelectItem value="openai_responses"> <SelectItem value="openai_responses">
{t("providerForm.apiFormatOpenAIResponses", { {t("providerForm.apiFormatOpenAIResponses", {
defaultValue: "OpenAI Responses API (需转换)", defaultValue: "OpenAI Responses API (需转换)",

View File

@@ -48,8 +48,9 @@ export interface ProviderPreset {
// Claude API 格式(仅 Claude 供应商使用) // Claude API 格式(仅 Claude 供应商使用)
// - "anthropic" (默认): Anthropic Messages API 格式,直接透传 // - "anthropic" (默认): Anthropic Messages API 格式,直接透传
// - "openai_chat": OpenAI Chat Completions 格式,需要格式转换 // - "openai_chat": OpenAI Chat Completions 格式,需要格式转换
// - "gemini_chat": Gemini Chat 兼容格式,需要格式转换,但不注入 prompt_cache_key
// - "openai_responses": OpenAI Responses API 格式,需要格式转换 // - "openai_responses": OpenAI Responses API 格式,需要格式转换
apiFormat?: "anthropic" | "openai_chat" | "openai_responses"; apiFormat?: "anthropic" | "openai_chat" | "gemini_chat" | "openai_responses";
// 供应商类型标识(用于特殊供应商检测) // 供应商类型标识(用于特殊供应商检测)
// - "github_copilot": GitHub Copilot 供应商(需要 OAuth 认证) // - "github_copilot": GitHub Copilot 供应商(需要 OAuth 认证)

View File

@@ -158,6 +158,13 @@ export function useProviderActions(activeApp: AppId, isProxyRunning?: boolean) {
proxyRequiredReason = t("notifications.proxyReasonOpenAIChat", { proxyRequiredReason = t("notifications.proxyReasonOpenAIChat", {
defaultValue: "使用 OpenAI Chat 接口格式", defaultValue: "使用 OpenAI Chat 接口格式",
}); });
} else if (
provider.meta?.apiFormat === "gemini_chat" &&
activeApp === "claude"
) {
proxyRequiredReason = t("notifications.proxyReasonGeminiChat", {
defaultValue: "使用 Gemini Chat 兼容接口格式",
});
} else if ( } else if (
provider.meta?.apiFormat === "openai_responses" && provider.meta?.apiFormat === "openai_responses" &&
activeApp === "claude" activeApp === "claude"
@@ -207,6 +214,7 @@ export function useProviderActions(activeApp: AppId, isProxyRunning?: boolean) {
provider.category !== "official" && provider.category !== "official" &&
(isCopilotProvider || (isCopilotProvider ||
provider.meta?.apiFormat === "openai_chat" || provider.meta?.apiFormat === "openai_chat" ||
provider.meta?.apiFormat === "gemini_chat" ||
provider.meta?.apiFormat === "openai_responses") provider.meta?.apiFormat === "openai_responses")
) { ) {
// OpenAI format provider: show proxy hint (skip if warning already shown) // OpenAI format provider: show proxy hint (skip if warning already shown)

View File

@@ -177,6 +177,7 @@
"proxyRequiredForSwitch": "This provider {{reason}}, requires the proxy service to work properly. Start the proxy first.", "proxyRequiredForSwitch": "This provider {{reason}}, requires the proxy service to work properly. Start the proxy first.",
"proxyReasonCopilot": "uses GitHub Copilot as a Claude provider", "proxyReasonCopilot": "uses GitHub Copilot as a Claude provider",
"proxyReasonOpenAIChat": "uses OpenAI Chat API format", "proxyReasonOpenAIChat": "uses OpenAI Chat API format",
"proxyReasonGeminiChat": "uses Gemini Chat compatible API format",
"proxyReasonOpenAIResponses": "uses OpenAI Responses API format", "proxyReasonOpenAIResponses": "uses OpenAI Responses API format",
"proxyReasonFullUrl": "has full URL connection mode enabled", "proxyReasonFullUrl": "has full URL connection mode enabled",
"openAIFormatHint": "This provider uses OpenAI-compatible format and requires the proxy service to be enabled", "openAIFormatHint": "This provider uses OpenAI-compatible format and requires the proxy service to be enabled",
@@ -746,6 +747,7 @@
"modelHint": "💡 Leave blank to use provider's default model", "modelHint": "💡 Leave blank to use provider's default model",
"apiHint": "💡 Fill in Claude API compatible service endpoint, avoid trailing slash", "apiHint": "💡 Fill in Claude API compatible service endpoint, avoid trailing slash",
"apiHintOAI": "💡 Fill in OpenAI Chat Completions compatible service endpoint, avoid trailing slash", "apiHintOAI": "💡 Fill in OpenAI Chat Completions compatible service endpoint, avoid trailing slash",
"apiHintGeminiChat": "💡 Fill in a Gemini Chat compatible service endpoint. For Google AI Studio, use the /v1beta/openai root and avoid a trailing slash",
"codexApiHint": "💡 Fill in service endpoint compatible with OpenAI Response format", "codexApiHint": "💡 Fill in service endpoint compatible with OpenAI Response format",
"fillSupplierName": "Please fill in provider name", "fillSupplierName": "Please fill in provider name",
"fillConfigContent": "Please fill in configuration content", "fillConfigContent": "Please fill in configuration content",
@@ -770,6 +772,7 @@
"fullUrlHint": "💡 Enter the full request URL. This mode requires the proxy to be enabled, and the proxy will use the URL as-is without appending a path", "fullUrlHint": "💡 Enter the full request URL. This mode requires the proxy to be enabled, and the proxy will use the URL as-is without appending a path",
"apiFormatAnthropic": "Anthropic Messages (Native)", "apiFormatAnthropic": "Anthropic Messages (Native)",
"apiFormatOpenAIChat": "OpenAI Chat Completions (Requires proxy)", "apiFormatOpenAIChat": "OpenAI Chat Completions (Requires proxy)",
"apiFormatGeminiChat": "Gemini Chat Compatible (Requires proxy)",
"apiFormatOpenAIResponses": "OpenAI Responses API (Requires proxy)", "apiFormatOpenAIResponses": "OpenAI Responses API (Requires proxy)",
"authField": "Auth Field", "authField": "Auth Field",
"authFieldAuthToken": "ANTHROPIC_AUTH_TOKEN (Default)", "authFieldAuthToken": "ANTHROPIC_AUTH_TOKEN (Default)",

View File

@@ -177,6 +177,7 @@
"proxyRequiredForSwitch": "このプロバイダーは{{reason}}、プロキシサービスが必要です。先にプロキシを起動してください", "proxyRequiredForSwitch": "このプロバイダーは{{reason}}、プロキシサービスが必要です。先にプロキシを起動してください",
"proxyReasonCopilot": "GitHub Copilot を Claude プロバイダーとして使用しており", "proxyReasonCopilot": "GitHub Copilot を Claude プロバイダーとして使用しており",
"proxyReasonOpenAIChat": "OpenAI Chat API フォーマットを使用しており", "proxyReasonOpenAIChat": "OpenAI Chat API フォーマットを使用しており",
"proxyReasonGeminiChat": "Gemini Chat 互換 API フォーマットを使用しており",
"proxyReasonOpenAIResponses": "OpenAI Responses API フォーマットを使用しており", "proxyReasonOpenAIResponses": "OpenAI Responses API フォーマットを使用しており",
"proxyReasonFullUrl": "完全 URL 接続モードが有効になっており", "proxyReasonFullUrl": "完全 URL 接続モードが有効になっており",
"openAIFormatHint": "このプロバイダーは OpenAI 互換フォーマットを使用しており、プロキシサービスの有効化が必要です", "openAIFormatHint": "このプロバイダーは OpenAI 互換フォーマットを使用しており、プロキシサービスの有効化が必要です",
@@ -746,6 +747,7 @@
"modelHint": "💡 空欄ならプロバイダーのデフォルトモデルを使用します", "modelHint": "💡 空欄ならプロバイダーのデフォルトモデルを使用します",
"apiHint": "💡 Claude API 互換サービスのエンドポイントを入力してください。末尾にスラッシュを付けないでください", "apiHint": "💡 Claude API 互換サービスのエンドポイントを入力してください。末尾にスラッシュを付けないでください",
"apiHintOAI": "💡 OpenAI Chat Completions 互換サービスのエンドポイントを入力してください。末尾にスラッシュを付けないでください", "apiHintOAI": "💡 OpenAI Chat Completions 互換サービスのエンドポイントを入力してください。末尾にスラッシュを付けないでください",
"apiHintGeminiChat": "💡 Gemini Chat 互換サービスのエンドポイントを入力してください。Google AI Studio の場合は /v1beta/openai ルートを使い、末尾にスラッシュを付けないでください",
"codexApiHint": "💡 OpenAI Response 互換のサービスエンドポイントを入力してください", "codexApiHint": "💡 OpenAI Response 互換のサービスエンドポイントを入力してください",
"fillSupplierName": "プロバイダー名を入力してください", "fillSupplierName": "プロバイダー名を入力してください",
"fillConfigContent": "設定内容を入力してください", "fillConfigContent": "設定内容を入力してください",
@@ -770,6 +772,7 @@
"fullUrlHint": "💡 完全なリクエスト URL を入力してください。このモードはプロキシを有効にして使用する必要があり、プロキシはこの URL をそのまま使用し、パスを追加しません", "fullUrlHint": "💡 完全なリクエスト URL を入力してください。このモードはプロキシを有効にして使用する必要があり、プロキシはこの URL をそのまま使用し、パスを追加しません",
"apiFormatAnthropic": "Anthropic Messagesネイティブ", "apiFormatAnthropic": "Anthropic Messagesネイティブ",
"apiFormatOpenAIChat": "OpenAI Chat Completionsプロキシが必要", "apiFormatOpenAIChat": "OpenAI Chat Completionsプロキシが必要",
"apiFormatGeminiChat": "Gemini Chat Compatibleプロキシが必要",
"apiFormatOpenAIResponses": "OpenAI Responses APIプロキシが必要", "apiFormatOpenAIResponses": "OpenAI Responses APIプロキシが必要",
"authField": "認証フィールド", "authField": "認証フィールド",
"authFieldAuthToken": "ANTHROPIC_AUTH_TOKENデフォルト", "authFieldAuthToken": "ANTHROPIC_AUTH_TOKENデフォルト",

View File

@@ -177,6 +177,7 @@
"proxyRequiredForSwitch": "此供应商{{reason}},需要代理服务才能正常使用,请先启动代理", "proxyRequiredForSwitch": "此供应商{{reason}},需要代理服务才能正常使用,请先启动代理",
"proxyReasonCopilot": "使用 GitHub Copilot 作为 Claude 供应商", "proxyReasonCopilot": "使用 GitHub Copilot 作为 Claude 供应商",
"proxyReasonOpenAIChat": "使用 OpenAI Chat 接口格式", "proxyReasonOpenAIChat": "使用 OpenAI Chat 接口格式",
"proxyReasonGeminiChat": "使用 Gemini Chat 兼容接口格式",
"proxyReasonOpenAIResponses": "使用 OpenAI Responses 接口格式", "proxyReasonOpenAIResponses": "使用 OpenAI Responses 接口格式",
"proxyReasonFullUrl": "开启了完整 URL 连接模式", "proxyReasonFullUrl": "开启了完整 URL 连接模式",
"openAIFormatHint": "此供应商使用 OpenAI 兼容格式,需要开启代理服务才能正常使用", "openAIFormatHint": "此供应商使用 OpenAI 兼容格式,需要开启代理服务才能正常使用",
@@ -746,6 +747,7 @@
"modelHint": "💡 留空将使用供应商的默认模型", "modelHint": "💡 留空将使用供应商的默认模型",
"apiHint": "💡 填写兼容 Claude API 的服务端点地址,不要以斜杠结尾", "apiHint": "💡 填写兼容 Claude API 的服务端点地址,不要以斜杠结尾",
"apiHintOAI": "💡 填写兼容 OpenAI Chat Completions 的服务端点地址,不要以斜杠结尾", "apiHintOAI": "💡 填写兼容 OpenAI Chat Completions 的服务端点地址,不要以斜杠结尾",
"apiHintGeminiChat": "💡 填写兼容 Gemini Chat 的服务端点地址,例如 Google AI Studio 可填写到 /v1beta/openai且不要以斜杠结尾",
"codexApiHint": "💡 填写兼容 OpenAI Response 格式的服务端点地址", "codexApiHint": "💡 填写兼容 OpenAI Response 格式的服务端点地址",
"fillSupplierName": "请填写供应商名称", "fillSupplierName": "请填写供应商名称",
"fillConfigContent": "请填写配置内容", "fillConfigContent": "请填写配置内容",
@@ -770,6 +772,7 @@
"fullUrlHint": "💡 请填写完整请求 URL并且必须开启代理后使用代理将直接使用此 URL不拼接路径", "fullUrlHint": "💡 请填写完整请求 URL并且必须开启代理后使用代理将直接使用此 URL不拼接路径",
"apiFormatAnthropic": "Anthropic Messages (原生)", "apiFormatAnthropic": "Anthropic Messages (原生)",
"apiFormatOpenAIChat": "OpenAI Chat Completions (需开启代理)", "apiFormatOpenAIChat": "OpenAI Chat Completions (需开启代理)",
"apiFormatGeminiChat": "Gemini Chat Compatible (需开启代理)",
"apiFormatOpenAIResponses": "OpenAI Responses API (需开启代理)", "apiFormatOpenAIResponses": "OpenAI Responses API (需开启代理)",
"authField": "认证字段", "authField": "认证字段",
"authFieldAuthToken": "ANTHROPIC_AUTH_TOKEN默认", "authFieldAuthToken": "ANTHROPIC_AUTH_TOKEN默认",

View File

@@ -158,15 +158,16 @@ export interface ProviderMeta {
// Claude API 格式(仅 Claude 供应商使用) // Claude API 格式(仅 Claude 供应商使用)
// - "anthropic": 原生 Anthropic Messages API 格式,直接透传 // - "anthropic": 原生 Anthropic Messages API 格式,直接透传
// - "openai_chat": OpenAI Chat Completions 格式,需要格式转换 // - "openai_chat": OpenAI Chat Completions 格式,需要格式转换
// - "gemini_chat": Gemini Chat 兼容格式,需要格式转换,但不注入 prompt_cache_key
// - "openai_responses": OpenAI Responses API 格式,需要格式转换 // - "openai_responses": OpenAI Responses API 格式,需要格式转换
apiFormat?: "anthropic" | "openai_chat" | "openai_responses"; apiFormat?: "anthropic" | "openai_chat" | "gemini_chat" | "openai_responses";
// 通用认证绑定 // 通用认证绑定
authBinding?: AuthBinding; authBinding?: AuthBinding;
// Claude 认证字段名 // Claude 认证字段名
apiKeyField?: ClaudeApiKeyField; apiKeyField?: ClaudeApiKeyField;
// 是否将 base_url 视为完整 API 端点(代理直接使用此 URL不拼接路径 // 是否将 base_url 视为完整 API 端点(代理直接使用此 URL不拼接路径
isFullUrl?: boolean; isFullUrl?: boolean;
// Prompt cache key for OpenAI-compatible endpoints (improves cache hit rate) // Prompt cache key for compatible endpoints that accept it (not used by gemini_chat)
promptCacheKey?: string; promptCacheKey?: string;
// 供应商类型(用于识别 Copilot 等特殊供应商) // 供应商类型(用于识别 Copilot 等特殊供应商)
providerType?: string; providerType?: string;
@@ -180,8 +181,13 @@ export type SkillSyncMethod = "auto" | "symlink" | "copy";
// Claude API 格式类型 // Claude API 格式类型
// - "anthropic": 原生 Anthropic Messages API 格式,直接透传 // - "anthropic": 原生 Anthropic Messages API 格式,直接透传
// - "openai_chat": OpenAI Chat Completions 格式,需要格式转换 // - "openai_chat": OpenAI Chat Completions 格式,需要格式转换
// - "gemini_chat": Gemini Chat 兼容格式,需要格式转换,但不注入 prompt_cache_key
// - "openai_responses": OpenAI Responses API 格式,需要格式转换 // - "openai_responses": OpenAI Responses API 格式,需要格式转换
export type ClaudeApiFormat = "anthropic" | "openai_chat" | "openai_responses"; export type ClaudeApiFormat =
| "anthropic"
| "openai_chat"
| "gemini_chat"
| "openai_responses";
// Claude 认证字段类型 // Claude 认证字段类型
export type ClaudeApiKeyField = "ANTHROPIC_AUTH_TOKEN" | "ANTHROPIC_API_KEY"; export type ClaudeApiKeyField = "ANTHROPIC_AUTH_TOKEN" | "ANTHROPIC_API_KEY";