fix(codex): fix 404 errors and connection timeout with custom base_url (#760)

* fix(proxy): fix Codex 404 errors with custom base_url prefixes

- handlers.rs:268: Remove hardcoded /v1 prefix in Codex forwarding
- codex.rs:140: Only add /v1 for origin-only base_urls, dedupe /v1/v1
- stream_check.rs:364: Try /responses first, fallback to /v1/responses
- provider.rs:427: Don't force /v1 for custom prefix base_urls

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* fix(codex): always add /v1 for custom prefix base_urls

Changed logic to always add /v1 prefix unless base_url already ends with /v1.
This fixes 504 timeout errors with relay services that expect /v1 in the path.

- Most relay services follow OpenAI standard format: /v1/responses
- Users can opt-out by adding /v1 to their base_url configuration
- Updated test case to reflect new behavior

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* fix(proxy): allow system proxy on localhost with different ports

- Only bypass system proxy if it points to CC Switch's own port (15721)
- Allow other localhost proxies (e.g., Clash on 7890) to be used
- Add INFO level logging for request URLs to aid debugging

This fixes connection timeout issues when using local proxy tools.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* fix(codex): don't add /v1 for custom prefix base_urls

Reverted logic to not add /v1 for base_urls with custom prefixes.
Many relay services use custom paths without /v1.

- Pure origin (e.g., https://api.openai.com) → adds /v1
- With /v1 (e.g., https://api.openai.com/v1) → no change
- Custom prefix (e.g., https://example.com/openai) → no /v1

This fixes 404 errors with relay services that don't use /v1 in their paths.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* fix(proxy): use dynamic port for system proxy detection

Instead of hardcoding port 15721, now uses the actual configured
listen_port from proxy settings.

- Added set_proxy_port() to update the port when proxy server starts
- Added get_proxy_port() to retrieve current port for detection
- Updated server.rs to call set_proxy_port() on startup
- Updated tests to reflect new behavior

This allows users to change the proxy port in settings without
breaking the system proxy detection logic.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* fix(proxy): change default proxy port from 15721 to 5000

Update the default fallback port in get_proxy_port() from 15721 to 5000
to match the project's standard default port configuration.

Also updated test cases to use port 5000 consistently.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* fix(proxy): revert default port back to 15721

Revert the default fallback port in get_proxy_port() from 5000 back to 15721
to align with the project's updated default port configuration.

Also updated test cases to use port 15721 consistently.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>

---------

Co-authored-by: ozbombor <ozbombor@users.noreply.github.com>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Andrew Leng
2026-01-29 09:10:17 +08:00
committed by GitHub
parent 164635f638
commit 0dd823ae3a
7 changed files with 214 additions and 50 deletions
+59 -4
View File
@@ -441,11 +441,18 @@ impl UniversalProvider {
.and_then(|m| m.reasoning_effort.clone())
.unwrap_or_else(|| "high".to_string());
// 确保 base_url 以 /v1 结尾(Codex 使用 OpenAI 兼容 API
let codex_base_url = if self.base_url.ends_with("/v1") {
self.base_url.clone()
// Codex/OpenAI 的 base_url 既可能是纯 origin(需要补 /v1),也可能包含自定义前缀(不应强行补版本
let base_trimmed = self.base_url.trim_end_matches('/');
let origin_only = match base_trimmed.split_once("://") {
Some((_scheme, rest)) => !rest.contains('/'),
None => !base_trimmed.contains('/'),
};
let codex_base_url = if base_trimmed.ends_with("/v1") {
base_trimmed.to_string()
} else if origin_only {
format!("{base_trimmed}/v1")
} else {
format!("{}/v1", self.base_url.trim_end_matches('/'))
base_trimmed.to_string()
};
// 生成 Codex 的 config.toml 内容
@@ -521,6 +528,54 @@ requires_openai_auth = true"#
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn universal_codex_provider_origin_base_url_adds_v1() {
let mut p = UniversalProvider::new(
"id".to_string(),
"Test".to_string(),
"custom".to_string(),
"https://api.openai.com".to_string(),
"sk-test".to_string(),
);
p.apps.codex = true;
let provider = p.to_codex_provider().expect("should build codex provider");
let toml = provider
.settings_config
.get("config")
.and_then(|v| v.as_str())
.expect("config should be a toml string");
assert!(toml.contains("base_url = \"https://api.openai.com/v1\""));
}
#[test]
fn universal_codex_provider_custom_prefix_does_not_force_v1() {
let mut p = UniversalProvider::new(
"id".to_string(),
"Test".to_string(),
"custom".to_string(),
"https://example.com/openai".to_string(),
"sk-test".to_string(),
);
p.apps.codex = true;
let provider = p.to_codex_provider().expect("should build codex provider");
let toml = provider
.settings_config
.get("config")
.and_then(|v| v.as_str())
.expect("config should be a toml string");
assert!(toml.contains("base_url = \"https://example.com/openai\""));
assert!(!toml.contains("https://example.com/openai/v1"));
}
}
// ============================================================================
// OpenCode 供应商配置结构
// ============================================================================
+1 -1
View File
@@ -666,7 +666,7 @@ impl RequestForwarder {
// 输出请求信息日志
let tag = adapter.name();
log::debug!("[{tag}] >>> 请求 URL: {url}");
log::info!("[{tag}] >>> 请求 URL: {url}");
if let Ok(body_str) = serde_json::to_string(&filtered_body) {
log::debug!(
"[{tag}] >>> 请求体内容 ({}字节): {}",
+2 -2
View File
@@ -284,7 +284,7 @@ pub async fn handle_chat_completions(
let result = match forwarder
.forward_with_retry(
&AppType::Codex,
"/v1/chat/completions",
"/chat/completions",
body,
headers,
ctx.get_providers(),
@@ -325,7 +325,7 @@ pub async fn handle_responses(
let result = match forwarder
.forward_with_retry(
&AppType::Codex,
"/v1/responses",
"/responses",
body,
headers,
ctx.get_providers(),
+61 -6
View File
@@ -17,6 +17,33 @@ static GLOBAL_CLIENT: OnceCell<RwLock<Client>> = OnceCell::new();
/// 当前代理 URL(用于日志和状态查询)
static CURRENT_PROXY_URL: OnceCell<RwLock<Option<String>>> = OnceCell::new();
/// CC Switch 代理服务器当前监听的端口
static CC_SWITCH_PROXY_PORT: OnceCell<RwLock<u16>> = OnceCell::new();
/// 设置 CC Switch 代理服务器的监听端口
///
/// 应在代理服务器启动时调用,以便系统代理检测能正确识别自己的端口
pub fn set_proxy_port(port: u16) {
if let Some(lock) = CC_SWITCH_PROXY_PORT.get() {
if let Ok(mut current_port) = lock.write() {
*current_port = port;
log::debug!("[GlobalProxy] Updated CC Switch proxy port to {port}");
}
} else {
let _ = CC_SWITCH_PROXY_PORT.set(RwLock::new(port));
log::debug!("[GlobalProxy] Initialized CC Switch proxy port to {port}");
}
}
/// 获取 CC Switch 代理服务器的监听端口
fn get_proxy_port() -> u16 {
CC_SWITCH_PROXY_PORT
.get()
.and_then(|lock| lock.read().ok())
.map(|port| *port)
.unwrap_or(15721) // 默认端口作为回退
}
/// 初始化全局 HTTP 客户端
///
/// 应在应用启动时调用一次。
@@ -258,9 +285,17 @@ fn proxy_points_to_loopback(value: &str) -> bool {
.unwrap_or(false)
}
// 检查是否指向 CC Switch 自己的代理端口
// 只有指向自己的代理才需要跳过,避免递归
fn is_cc_switch_proxy_port(port: Option<u16>) -> bool {
let cc_switch_port = get_proxy_port();
port == Some(cc_switch_port)
}
if let Ok(parsed) = url::Url::parse(value) {
if let Some(host) = parsed.host_str() {
return host_is_loopback(host);
// 只有当主机是 loopback 且端口是 CC Switch 的端口时才返回 true
return host_is_loopback(host) && is_cc_switch_proxy_port(parsed.port());
}
return false;
}
@@ -268,7 +303,7 @@ fn proxy_points_to_loopback(value: &str) -> bool {
let with_scheme = format!("http://{value}");
if let Ok(parsed) = url::Url::parse(&with_scheme) {
if let Some(host) = parsed.host_str() {
return host_is_loopback(host);
return host_is_loopback(host) && is_cc_switch_proxy_port(parsed.port());
}
}
@@ -448,16 +483,30 @@ mod tests {
#[test]
fn test_proxy_points_to_loopback() {
assert!(proxy_points_to_loopback("http://127.0.0.1:7890"));
assert!(proxy_points_to_loopback("socks5://localhost:1080"));
assert!(proxy_points_to_loopback("127.0.0.1:7890"));
// 设置 CC Switch 代理端口为 15721(默认值)
set_proxy_port(15721);
// 只有指向 CC Switch 自己端口的 loopback 地址才返回 true
assert!(proxy_points_to_loopback("http://127.0.0.1:15721"));
assert!(proxy_points_to_loopback("socks5://localhost:15721"));
assert!(proxy_points_to_loopback("127.0.0.1:15721"));
// 其他 loopback 端口不应该被跳过(允许使用其他本地代理工具)
assert!(!proxy_points_to_loopback("http://127.0.0.1:7890"));
assert!(!proxy_points_to_loopback("socks5://localhost:1080"));
// 非 loopback 地址不应该被跳过
assert!(!proxy_points_to_loopback("http://192.168.1.10:7890"));
assert!(!proxy_points_to_loopback("http://192.168.1.10:15721"));
}
#[test]
fn test_system_proxy_points_to_loopback() {
let _guard = env_lock().lock().unwrap();
// 设置 CC Switch 代理端口
set_proxy_port(15721);
let keys = [
"HTTP_PROXY",
"http_proxy",
@@ -471,9 +520,15 @@ mod tests {
std::env::remove_var(key);
}
std::env::set_var("HTTP_PROXY", "http://127.0.0.1:7890");
// 指向 CC Switch 端口的代理应该被跳过
std::env::set_var("HTTP_PROXY", "http://127.0.0.1:15721");
assert!(system_proxy_points_to_loopback());
// 指向其他端口的本地代理不应该被跳过
std::env::set_var("HTTP_PROXY", "http://127.0.0.1:7890");
assert!(!system_proxy_points_to_loopback());
// 非 loopback 地址不应该被跳过
std::env::set_var("HTTP_PROXY", "http://10.0.0.2:7890");
assert!(!system_proxy_points_to_loopback());
+40 -3
View File
@@ -141,10 +141,33 @@ impl ProviderAdapter for CodexAdapter {
let base_trimmed = base_url.trim_end_matches('/');
let endpoint_trimmed = endpoint.trim_start_matches('/');
let mut url = format!("{base_trimmed}/{endpoint_trimmed}");
// OpenAI/Codex 的 base_url 可能是:
// - 纯 origin: https://api.openai.com (需要自动补 /v1)
// - 已含 /v1: https://api.openai.com/v1 (直接拼接)
// - 自定义前缀: https://xxx/openai (不添加 /v1,直接拼接)
// 去除重复的 /v1/v1
if url.contains("/v1/v1") {
// 检查 base_url 是否已经包含 /v1
let already_has_v1 = base_trimmed.ends_with("/v1");
// 检查是否是纯 origin(没有路径部分)
let origin_only = match base_trimmed.split_once("://") {
Some((_scheme, rest)) => !rest.contains('/'),
None => !base_trimmed.contains('/'),
};
let mut url = if already_has_v1 {
// 已经有 /v1,直接拼接
format!("{base_trimmed}/{endpoint_trimmed}")
} else if origin_only {
// 纯 origin,添加 /v1
format!("{base_trimmed}/v1/{endpoint_trimmed}")
} else {
// 自定义前缀,不添加 /v1,直接拼接
format!("{base_trimmed}/{endpoint_trimmed}")
};
// 去除重复的 /v1/v1(可能由 base_url 与 endpoint 都带版本导致)
while url.contains("/v1/v1") {
url = url.replace("/v1/v1", "/v1");
}
@@ -223,6 +246,20 @@ mod tests {
assert_eq!(url, "https://api.openai.com/v1/responses");
}
#[test]
fn test_build_url_origin_adds_v1() {
let adapter = CodexAdapter::new();
let url = adapter.build_url("https://api.openai.com", "/responses");
assert_eq!(url, "https://api.openai.com/v1/responses");
}
#[test]
fn test_build_url_custom_prefix_no_v1() {
let adapter = CodexAdapter::new();
let url = adapter.build_url("https://example.com/openai", "/responses");
assert_eq!(url, "https://example.com/openai/responses");
}
#[test]
fn test_build_url_dedup_v1() {
let adapter = CodexAdapter::new();
+3
View File
@@ -98,6 +98,9 @@ impl ProxyServer {
log::info!("[{}] 代理服务器启动于 {addr}", log_srv::STARTED);
// 更新全局代理端口,用于系统代理检测
crate::proxy::http_client::set_proxy_port(self.config.listen_port);
// 保存关闭句柄
*self.shutdown_tx.write().await = Some(shutdown_tx);
+48 -34
View File
@@ -373,11 +373,15 @@ impl StreamCheckService {
timeout: std::time::Duration,
) -> Result<(u16, String), AppError> {
let base = base_url.trim_end_matches('/');
// Codex CLI 使用 /v1/responses 端点 (OpenAI Responses API)
let url = if base.ends_with("/v1") {
format!("{base}/responses")
// Codex CLI 的 base_url 语义:base_url 是 API base(可能已包含 /v1 或其他自定义前缀),
// Responses 端点为 `/responses`。
//
// 兼容:如果 base_url 配成纯 origin(如 https://api.openai.com),则需要补 `/v1`。
// 优先尝试 `{base}/responses`,若 404 再回退 `{base}/v1/responses`。
let urls = if base.ends_with("/v1") {
vec![format!("{base}/responses")]
} else {
format!("{base}/v1/responses")
vec![format!("{base}/responses"), format!("{base}/v1/responses")]
};
// 解析模型名和推理等级 (支持 model@level 或 model#level 格式)
@@ -399,40 +403,50 @@ impl StreamCheckService {
body["reasoning"] = json!({ "effort": effort });
}
// 严格按照 Codex CLI 请求格式设置 headers
let response = client
.post(&url)
.header("authorization", format!("Bearer {}", auth.api_key))
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.header("accept-encoding", "identity")
.header(
"user-agent",
format!("codex_cli_rs/0.80.0 ({os_name} 15.7.2; {arch_name}) Terminal"),
)
.header("originator", "codex_cli_rs")
.timeout(timeout)
.json(&body)
.send()
.await
.map_err(Self::map_request_error)?;
for (i, url) in urls.iter().enumerate() {
// 严格按照 Codex CLI 请求格式设置 headers
let response = client
.post(url)
.header("authorization", format!("Bearer {}", auth.api_key))
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.header("accept-encoding", "identity")
.header(
"user-agent",
format!("codex_cli_rs/0.80.0 ({os_name} 15.7.2; {arch_name}) Terminal"),
)
.header("originator", "codex_cli_rs")
.timeout(timeout)
.json(&body)
.send()
.await
.map_err(Self::map_request_error)?;
let status = response.status().as_u16();
let status = response.status().as_u16();
if !response.status().is_success() {
let error_text = response.text().await.unwrap_or_default();
return Err(AppError::Message(format!("HTTP {status}: {error_text}")));
}
let mut stream = response.bytes_stream();
if let Some(chunk) = stream.next().await {
match chunk {
Ok(_) => Ok((status, model.to_string())),
Err(e) => Err(AppError::Message(format!("Stream read failed: {e}"))),
if !response.status().is_success() {
let error_text = response.text().await.unwrap_or_default();
// 回退策略:仅当首选 URL 返回 404 时尝试下一个
if i == 0 && status == 404 && urls.len() > 1 {
continue;
}
return Err(AppError::Message(format!("HTTP {status}: {error_text}")));
}
} else {
Err(AppError::Message("No response data received".to_string()))
let mut stream = response.bytes_stream();
if let Some(chunk) = stream.next().await {
match chunk {
Ok(_) => return Ok((status, actual_model)),
Err(e) => return Err(AppError::Message(format!("Stream read failed: {e}"))),
}
}
return Err(AppError::Message("No response data received".to_string()));
}
Err(AppError::Message(
"No valid Codex responses endpoint found".to_string(),
))
}
/// Gemini 流式检查