Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion BitFun-Installer/src/data/modelProviders.ts
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ export const PROVIDER_TEMPLATES: Record<string, ProviderTemplate> = {
descriptionKey: 'model.providers.deepseek.description',
baseUrl: 'https://api.deepseek.com/v1',
format: 'openai',
models: ['deepseek-chat', 'deepseek-reasoner'],
models: ['deepseek-v4-flash', 'deepseek-v4-pro'],
helpUrl: 'https://platform.deepseek.com/api_keys',
},
zhipu: {
Expand Down
2 changes: 1 addition & 1 deletion BitFun-Installer/src/i18n/locales/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
"installDone": "Installation complete",
"provider": "Provider",
"config": "Connection",
"modelName": "Model name (e.g. deepseek-chat)",
"modelName": "Model name (e.g. deepseek-v4-flash)",
"apiKey": "API key",
"back": "Back",
"skip": "Skip for now",
Expand Down
2 changes: 1 addition & 1 deletion BitFun-Installer/src/i18n/locales/zh-TW.json
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
"installDone": "安裝完成",
"provider": "服務商",
"config": "連接信息",
"modelName": "模型名稱(如 deepseek-chat)",
"modelName": "模型名稱(如 deepseek-v4-flash)",
"apiKey": "API Key",
"back": "返回",
"skip": "稍後配置",
Expand Down
2 changes: 1 addition & 1 deletion BitFun-Installer/src/i18n/locales/zh.json
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
"installDone": "安装完成",
"provider": "服务商",
"config": "连接信息",
"modelName": "模型名称(如 deepseek-chat)",
"modelName": "模型名称(如 deepseek-v4-flash)",
"apiKey": "API Key",
"back": "返回",
"skip": "稍后配置",
Expand Down
118 changes: 117 additions & 1 deletion src/crates/ai-adapters/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -422,9 +422,82 @@ mod tests {

assert_eq!(request_body["thinking"]["type"], "enabled");
assert!(request_body.get("enable_thinking").is_none());
assert!(request_body.get("reasoning_effort").is_none());
assert!(request_body.get("reasoning_split").is_none());
}

#[test]
fn build_openai_request_body_adds_deepseek_reasoning_effort() {
let client = AIClient::new(AIConfig {
name: "deepseek".to_string(),
base_url: "https://api.deepseek.com/v1".to_string(),
request_url: "https://api.deepseek.com/v1/chat/completions".to_string(),
api_key: "test-key".to_string(),
model: "deepseek-v4-pro".to_string(),
format: "openai".to_string(),
context_window: 128000,
max_tokens: Some(4096),
temperature: None,
top_p: None,
reasoning_mode: ReasoningMode::Enabled,
inline_think_in_text: false,
custom_headers: None,
custom_headers_mode: None,
skip_ssl_verify: false,
reasoning_effort: Some("xhigh".to_string()),
thinking_budget_tokens: None,
custom_request_body: None,
custom_request_body_mode: None,
});

let request_body = openai::chat::build_request_body(
&client,
&client.config.request_url,
vec![json!({ "role": "user", "content": "hello" })],
None,
None,
);

assert_eq!(request_body["thinking"]["type"], "enabled");
assert_eq!(request_body["reasoning_effort"], "max");
}

#[test]
fn build_openai_request_body_omits_deepseek_reasoning_effort_when_disabled() {
let client = AIClient::new(AIConfig {
name: "deepseek".to_string(),
base_url: "https://api.deepseek.com/v1".to_string(),
request_url: "https://api.deepseek.com/v1/chat/completions".to_string(),
api_key: "test-key".to_string(),
model: "deepseek-v4-flash".to_string(),
format: "openai".to_string(),
context_window: 128000,
max_tokens: Some(4096),
temperature: None,
top_p: None,
reasoning_mode: ReasoningMode::Disabled,
inline_think_in_text: false,
custom_headers: None,
custom_headers_mode: None,
skip_ssl_verify: false,
reasoning_effort: Some("max".to_string()),
thinking_budget_tokens: None,
custom_request_body: None,
custom_request_body_mode: None,
});

let request_body = openai::chat::build_request_body(
&client,
&client.config.request_url,
vec![json!({ "role": "user", "content": "hello" })],
None,
None,
);

assert_eq!(request_body["thinking"]["type"], "disabled");
assert!(request_body.get("reasoning_effort").is_none());
}

#[test]
fn build_openai_request_body_uses_enable_thinking_for_siliconflow() {
let client = AIClient::new(AIConfig {
Expand Down Expand Up @@ -536,10 +609,52 @@ mod tests {
assert_eq!(request_body["output_config"]["effort"], "high");
}

#[test]
fn build_anthropic_request_body_adds_deepseek_reasoning_effort() {
let client = AIClient::new(AIConfig {
name: "deepseek".to_string(),
base_url: "https://api.deepseek.com/anthropic".to_string(),
request_url: "https://api.deepseek.com/anthropic/v1/messages".to_string(),
api_key: "test-key".to_string(),
model: "deepseek-v4-pro".to_string(),
format: "anthropic".to_string(),
context_window: 200000,
max_tokens: Some(8192),
temperature: None,
top_p: None,
reasoning_mode: ReasoningMode::Enabled,
inline_think_in_text: false,
custom_headers: None,
custom_headers_mode: None,
skip_ssl_verify: false,
reasoning_effort: Some("xhigh".to_string()),
thinking_budget_tokens: None,
custom_request_body: None,
custom_request_body_mode: None,
});

let request_body = anthropic::request::build_request_body(
&client,
&client.config.request_url,
None,
vec![json!({ "role": "user", "content": [{ "type": "text", "text": "hello" }] })],
None,
None,
);

assert_eq!(request_body["thinking"]["type"], "enabled");
assert_eq!(request_body["output_config"]["effort"], "max");
}

#[test]
fn build_openai_request_body_trim_mode_preserves_essential_fields() {
let mut client = make_trim_test_client("openai");
client.config.base_url = "https://api.deepseek.com/v1".to_string();
client.config.request_url = "https://api.deepseek.com/v1/chat/completions".to_string();
client.config.model = "deepseek-v4-pro".to_string();
client.config.max_tokens = Some(8192);
client.config.reasoning_mode = ReasoningMode::Enabled;
client.config.reasoning_effort = Some("high".to_string());
let messages = vec![json!({ "role": "user", "content": "hello" })];

let request_body = openai::chat::build_request_body(
Expand All @@ -557,13 +672,14 @@ mod tests {
})),
);

assert_eq!(request_body["model"], "test-model");
assert_eq!(request_body["model"], "deepseek-v4-pro");
assert_eq!(request_body["messages"], json!(messages));
assert_eq!(request_body["stream"], true);
assert_eq!(request_body["max_tokens"], 8192);
assert_eq!(request_body["temperature"], 0.7);
assert_eq!(request_body["response_format"]["type"], "json_object");
assert!(request_body.get("thinking").is_none());
assert!(request_body.get("reasoning_effort").is_none());
}

#[test]
Expand Down
37 changes: 37 additions & 0 deletions src/crates/ai-adapters/src/client/quirks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,29 @@ pub(crate) fn is_siliconflow_url(url: &str) -> bool {
url.contains("api.siliconflow.cn")
}

pub(crate) fn is_deepseek_url(url: &str) -> bool {
url.contains("api.deepseek.com")
}

pub(crate) fn is_deepseek_reasoning_effort_model(model_name: &str) -> bool {
matches!(
model_name.trim().to_ascii_lowercase().as_str(),
"deepseek-v4-flash" | "deepseek-v4-pro"
)
}

pub(crate) fn normalize_deepseek_reasoning_effort(effort: &str) -> Option<&'static str> {
match effort.trim().to_ascii_lowercase().as_str() {
"" => None,
"high" => Some("high"),
"max" => Some("max"),
"low" | "medium" => Some("high"),
"xhigh" => Some("max"),
"none" | "minimal" => None,
_ => Some("high"),
}
}

pub(crate) fn parse_glm_major_minor(model_name: &str) -> Option<(u32, u32)> {
let lower = model_name.to_ascii_lowercase();
let tail = lower.strip_prefix("glm-")?;
Expand Down Expand Up @@ -40,7 +63,9 @@ pub(crate) fn should_append_tool_stream(url: &str, model_name: &str) -> bool {
pub(crate) fn apply_openai_compatible_reasoning_fields(
request_body: &mut serde_json::Value,
mode: ReasoningMode,
reasoning_effort: Option<&str>,
url: &str,
model_name: &str,
) {
let normalized_mode = if mode == ReasoningMode::Adaptive {
ReasoningMode::Enabled
Expand All @@ -66,4 +91,16 @@ pub(crate) fn apply_openai_compatible_reasoning_fields(
}
ReasoningMode::Adaptive => unreachable!("adaptive mode is normalized above"),
}

if normalized_mode == ReasoningMode::Disabled {
return;
}

if !(is_deepseek_url(url) || is_deepseek_reasoning_effort_model(model_name)) {
return;
}

if let Some(effort) = reasoning_effort.and_then(normalize_deepseek_reasoning_effort) {
request_body["reasoning_effort"] = serde_json::json!(effort);
}
}
48 changes: 38 additions & 10 deletions src/crates/ai-adapters/src/providers/anthropic/message_converter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,18 +129,15 @@ impl AnthropicMessageConverter {
fn convert_assistant_message(msg: Message) -> Option<Value> {
let mut content = Vec::new();

if let Some(thinking) = msg.reasoning_content.as_ref() {
if !thinking.is_empty() {
let mut thinking_block = json!({
"type": "thinking",
"thinking": thinking
});
if msg.reasoning_content.is_some() || msg.thinking_signature.is_some() {
let mut thinking_block = json!({
"type": "thinking",
"thinking": msg.reasoning_content.as_deref().unwrap_or("")
});

thinking_block["signature"] =
json!(msg.thinking_signature.as_deref().unwrap_or(""));
thinking_block["signature"] = json!(msg.thinking_signature.as_deref().unwrap_or(""));

content.push(thinking_block);
}
content.push(thinking_block);
}

if let Some(text) = msg.content {
Expand Down Expand Up @@ -230,3 +227,34 @@ impl AnthropicMessageConverter {
})
}
}

#[cfg(test)]
mod tests {
use super::AnthropicMessageConverter;
use crate::types::Message;
use serde_json::json;

#[test]
fn preserves_empty_thinking_block_when_signature_exists() {
let msg = Message {
role: "assistant".to_string(),
content: Some("Answer".to_string()),
reasoning_content: Some(String::new()),
thinking_signature: Some("sig_1".to_string()),
tool_calls: None,
tool_call_id: None,
name: None,
is_error: None,
tool_image_attachments: None,
};

let (_, messages) = AnthropicMessageConverter::convert_messages(vec![msg]);
let content = messages[0]["content"]
.as_array()
.expect("assistant content");

assert_eq!(content[0]["type"], json!("thinking"));
assert_eq!(content[0]["thinking"], json!(""));
assert_eq!(content[0]["signature"], json!("sig_1"));
}
}
30 changes: 28 additions & 2 deletions src/crates/ai-adapters/src/providers/anthropic/request.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
use super::AnthropicMessageConverter;
use crate::client::quirks::should_append_tool_stream;
use crate::client::quirks::{
is_deepseek_reasoning_effort_model, is_deepseek_url, normalize_deepseek_reasoning_effort,
should_append_tool_stream,
};
use crate::client::sse::execute_sse_request;
use crate::client::{AIClient, StreamResponse};
use crate::providers::shared;
Expand Down Expand Up @@ -54,13 +57,26 @@ fn default_anthropic_budget_tokens(max_tokens: Option<u32>) -> Option<u32> {
fn apply_reasoning_fields(
request_body: &mut serde_json::Value,
mode: ReasoningMode,
url: &str,
model_name: &str,
max_tokens: Option<u32>,
reasoning_effort: Option<&str>,
thinking_budget_tokens: Option<u32>,
) {
let is_deepseek_reasoning_target =
is_deepseek_url(url) || is_deepseek_reasoning_effort_model(model_name);

match mode {
ReasoningMode::Default => {}
ReasoningMode::Default => {
if is_deepseek_reasoning_target {
if let Some(effort) = reasoning_effort.and_then(normalize_deepseek_reasoning_effort)
{
request_body["output_config"] = serde_json::json!({
"effort": effort
});
}
}
}
ReasoningMode::Disabled => {
request_body["thinking"] = serde_json::json!({ "type": "disabled" });
}
Expand All @@ -74,6 +90,14 @@ fn apply_reasoning_fields(
}
}
request_body["thinking"] = thinking;
if is_deepseek_reasoning_target {
if let Some(effort) = reasoning_effort.and_then(normalize_deepseek_reasoning_effort)
{
request_body["output_config"] = serde_json::json!({
"effort": effort
});
}
}
}
ReasoningMode::Adaptive => {
if anthropic_supports_adaptive_reasoning(model_name) {
Expand All @@ -92,6 +116,7 @@ fn apply_reasoning_fields(
apply_reasoning_fields(
request_body,
ReasoningMode::Enabled,
url,
model_name,
max_tokens,
None,
Expand Down Expand Up @@ -138,6 +163,7 @@ pub(crate) fn build_request_body(
apply_reasoning_fields(
&mut request_body,
client.config.reasoning_mode,
url,
&model_name,
Some(max_tokens),
client.config.reasoning_effort.as_deref(),
Expand Down
8 changes: 7 additions & 1 deletion src/crates/ai-adapters/src/providers/openai/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,13 @@ pub(crate) fn apply_reasoning_fields(
client: &AIClient,
url: &str,
) {
apply_openai_compatible_reasoning_fields(request_body, client.config.reasoning_mode, url);
apply_openai_compatible_reasoning_fields(
request_body,
client.config.reasoning_mode,
client.config.reasoning_effort.as_deref(),
url,
&client.config.model,
);
}

pub(crate) fn resolve_models_url(client: &AIClient) -> String {
Expand Down
Loading
Loading