Skip to content

Commit cd1feb5

Browse files
authored
🐛 bugfix: model status sync, SiliconFlow logic alignment, and VLM connectivity
🐛 bugfix: model status sync, SiliconFlow logic alignment, and VLM connectivity
2 parents f3a6a6e + 186443d commit cd1feb5

File tree

7 files changed

+308
-33
lines changed

7 files changed

+308
-33
lines changed

backend/services/model_management_service.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,11 @@ async def list_provider_models_for_tenant(tenant_id: str, provider: str, model_t
199199
model_list = get_models_by_tenant_factory_type(
200200
tenant_id, provider, model_type)
201201
for model in model_list:
202-
model["id"] = model["model_repo"] + "/" + model["model_name"]
202+
# Use add_repo_to_name for consistent format with /model/list API
203+
model["id"] = add_repo_to_name(
204+
model_repo=model["model_repo"],
205+
model_name=model["model_name"],
206+
)
203207

204208
logging.debug(f"Provider model {provider} created successfully")
205209
return model_list

frontend/app/[locale]/models/components/model/ModelAddDialog.tsx

Lines changed: 41 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1300,19 +1300,47 @@ export const ModelAddDialog = ({
13001300
</a>
13011301
</Tooltip>
13021302
{form.isBatchImport && (
1303-
<Tooltip title="SiliconFlow">
1304-
<a
1305-
href={PROVIDER_LINKS.siliconflow}
1306-
target="_blank"
1307-
rel="noopener noreferrer"
1308-
>
1309-
<img
1310-
src="/siliconflow.png"
1311-
alt="SiliconFlow"
1312-
className="h-4 ml-1.5 cursor-pointer"
1313-
/>
1314-
</a>
1315-
</Tooltip>
1303+
<>
1304+
<Tooltip title="SiliconFlow">
1305+
<a
1306+
href={PROVIDER_LINKS.siliconflow}
1307+
target="_blank"
1308+
rel="noopener noreferrer"
1309+
>
1310+
<img
1311+
src="/siliconflow.png"
1312+
alt="SiliconFlow"
1313+
className="h-4 ml-1.5 cursor-pointer"
1314+
/>
1315+
</a>
1316+
</Tooltip>
1317+
<Tooltip title={t("model.provider.dashscope")}>
1318+
<a
1319+
href={PROVIDER_LINKS.dashscope}
1320+
target="_blank"
1321+
rel="noopener noreferrer"
1322+
>
1323+
<img
1324+
src="/aliyuncs.png"
1325+
alt="DashScope"
1326+
className="h-4 ml-1.5 cursor-pointer"
1327+
/>
1328+
</a>
1329+
</Tooltip>
1330+
<Tooltip title={t("model.provider.tokenpony")}>
1331+
<a
1332+
href={PROVIDER_LINKS.tokenpony}
1333+
target="_blank"
1334+
rel="noopener noreferrer"
1335+
>
1336+
<img
1337+
src="/tokenpony.png"
1338+
alt="TokenPony"
1339+
className="h-4 ml-1.5 cursor-pointer"
1340+
/>
1341+
</a>
1342+
</Tooltip>
1343+
</>
13161344
)}
13171345
{form.type === "llm" && !form.isBatchImport && (
13181346
<>

frontend/app/[locale]/models/components/model/ModelDeleteDialog.tsx

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1252,7 +1252,9 @@ export const ModelDeleteDialog = ({
12521252
</div>
12531253

12541254
{(selectedSource === MODEL_SOURCES.SILICON ||
1255-
selectedSource === MODEL_SOURCES.MODELENGINE) &&
1255+
selectedSource === MODEL_SOURCES.MODELENGINE ||
1256+
selectedSource === MODEL_SOURCES.DASHSCOPE ||
1257+
selectedSource === MODEL_SOURCES.TOKENPONY) &&
12561258
providerModels.length > 0 ? (
12571259
<div className="max-h-60 overflow-y-auto border border-gray-200 rounded-md divide-y divide-gray-200">
12581260
{providerModels.length > 0 && (

frontend/const/modelConfig.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,9 @@ export const PROVIDER_LINKS: Record<string, string> = {
8484
deepseek: "https://platform.deepseek.com/",
8585
qwen: "https://bailian.console.aliyun.com/",
8686
jina: "https://jina.ai/",
87-
baai: "https://www.baai.ac.cn/"
87+
baai: "https://www.baai.ac.cn/",
88+
dashscope: "https://dashscope.aliyun.com/",
89+
tokenpony: "https://www.tokenpony.cn/"
8890
};
8991

9092
// User role constants

sdk/nexent/assets/git-flow.png

162 KB
Loading

sdk/nexent/core/models/openai_vlm.py

Lines changed: 42 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1+
import asyncio
12
import base64
3+
import logging
24
import os
35
from typing import List, Dict, Any, Union, BinaryIO
46

@@ -7,6 +9,8 @@
79
from ..models import OpenAIModel
810
from ..utils.observer import MessageObserver
911

12+
logger = logging.getLogger(__name__)
13+
1014

1115
class OpenAIVLModel(OpenAIModel):
1216
def __init__(
@@ -32,17 +36,48 @@ def __init__(
3236

3337
async def check_connectivity(self) -> bool:
3438
"""
35-
Check the connectivity of the VLM model.
39+
Check the connectivity of the VLM model by sending a test request with
40+
a text prompt and an image. VLM APIs (especially DashScope qwen-vl)
41+
require specific format: content as a list with 'type': 'image' and
42+
'type': 'text' objects.
3643
3744
Returns:
38-
bool: Returns True if the model can respond normally, otherwise returns False.
45+
bool: True if the model responds successfully, otherwise False.
3946
"""
47+
# Use local test image from images folder - use absolute path based on module location
48+
module_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
49+
test_image_path = os.path.join(module_dir, "assets", "git-flow.png")
50+
if os.path.exists(test_image_path):
51+
base64_image = self.encode_image(test_image_path)
52+
# Detect image format for proper MIME type
53+
_, ext = os.path.splitext(test_image_path)
54+
image_format = ext.lower()[1:] if ext else "png"
55+
if image_format == "jpg":
56+
image_format = "jpeg"
57+
58+
content_parts: List[Dict[str, Any]] = [
59+
{"type": "image_url", "image_url": {"url": f"data:image/{image_format};base64,{base64_image}"}},
60+
{"type": "text", "text": "Hello"},
61+
]
62+
else:
63+
# Fallback to remote URL if local image not found
64+
test_image_url = "https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20250925/thtclx/input1.png"
65+
content_parts = [
66+
{"type": "image_url", "image_url": {"url": test_image_url}},
67+
{"type": "text", "text": "Hello"},
68+
]
69+
4070
try:
41-
# Directly reuse the parent class's check_connectivity method
42-
return await super().check_connectivity()
71+
await asyncio.to_thread(
72+
self.client.chat.completions.create,
73+
model=self.model_id,
74+
messages=[{"role": "user", "content": content_parts}],
75+
max_tokens=5,
76+
stream=False,
77+
)
78+
return True
4379
except Exception as e:
44-
import logging
45-
logging.error(f"VLM connectivity check failed: {str(e)}")
80+
logger.error("VLM connectivity check failed: %s", e)
4681
return False
4782

4883
def encode_image(self, image_input: Union[str, BinaryIO]) -> str:
@@ -87,7 +122,7 @@ def prepare_image_message(self, image_input: Union[str, BinaryIO], system_prompt
87122

88123
messages = [{"role": "system", "content": [{"text": system_prompt, "type": "text"}]}, {"role": "user",
89124
"content": [{"type": "image_url",
90-
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}", "detail": "auto"}}]}]
125+
"image_url": {"url": f"data:image/{image_format};base64,{base64_image}", "detail": "auto"}}]}]
91126

92127
return messages
93128

0 commit comments

Comments
 (0)