Skip to content

Commit 651583f

Browse files
committed
fix: repair for run from custom provider
1 parent 42f5a0e commit 651583f

4 files changed

Lines changed: 75 additions & 13 deletions

File tree

libs/core/kiln_ai/adapters/adapter_registry.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -90,16 +90,13 @@ def litellm_core_provider_config(
9090
run_config_properties.model_provider_name == ModelProviderName.openai_compatible
9191
and not run_config_properties.model_name.startswith("user_model::")
9292
):
93+
# Legacy openai_compatible model ids are "provider_name::model_id". We pull the
94+
# provider name out for base_url lookup, but keep the full slug on the run config
95+
# so it is faithfully persisted to disk and can be rehydrated later (e.g. on repair).
9396
model_id = run_config_properties.model_name
94-
try:
95-
openai_compatible_provider_name, model_id = model_id.split("::")
96-
except Exception:
97+
if "::" not in model_id:
9798
raise ValueError(f"Invalid openai compatible model ID: {model_id}")
98-
99-
# Update a copy of the run config properties to use the openai compatible provider
100-
updated_run_config_properties = run_config_properties.model_copy(deep=True)
101-
updated_run_config_properties.model_name = model_id
102-
run_config_properties = updated_run_config_properties
99+
openai_compatible_provider_name = model_id.split("::", 1)[0]
103100

104101
config = lite_llm_core_config_for_provider(
105102
core_provider_name, openai_compatible_provider_name

libs/core/kiln_ai/adapters/provider_tools.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,11 @@ def kiln_model_provider_from(
245245
def lite_llm_provider_model(
246246
model_id: str,
247247
) -> KilnModelProvider:
248+
# Legacy openai_compatible model ids are "{provider_name}::{model_id}".
249+
# litellm only needs the bare model id (it builds "openai/<model_id>"),
250+
# so strip the provider prefix here. The full slug stays on run_config.model_name.
251+
if "::" in model_id:
252+
model_id = model_id.split("::", 1)[1]
248253
return KilnModelProvider(
249254
name=ModelProviderName.openai_compatible,
250255
model_id=model_id,

libs/core/kiln_ai/adapters/test_adapter_registry.py

Lines changed: 56 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,17 +289,72 @@ def test_openai_compatible_adapter(basic_task):
289289
assert isinstance(adapter, LiteLlmAdapter)
290290
assert adapter.config.additional_body_options == {"api_key": "test-key"}
291291
assert adapter.config.base_url == "https://test.com/v1"
292-
assert adapter.config.run_config_properties.model_name == "test-model"
292+
# The full slug is preserved on the run config so it can be persisted and rehydrated.
293+
# The bare model id is only stripped at the litellm boundary (see lite_llm_provider_model).
294+
assert (
295+
adapter.config.run_config_properties.model_name
296+
== "some-provider::test-model"
297+
)
293298
assert (
294299
adapter.config.run_config_properties.model_provider_name
295300
== "openai_compatible"
296301
)
302+
assert adapter.model_provider().model_id == "test-model"
297303
assert adapter.config.run_config_properties.prompt_id == "simple_prompt_builder"
298304
assert (
299305
adapter.config.run_config_properties.structured_output_mode == "json_schema"
300306
)
301307

302308

309+
def test_openai_compatible_adapter_preserves_model_name_for_rehydration(basic_task):
310+
"""Regression: building an adapter must not strip the legacy "{provider}::{model_id}"
311+
prefix off run_config.model_name. _properties_for_task_output writes that name to
312+
disk; if it's stripped, repair (and any other rehydration path) re-reads a name
313+
without "::" and crashes splitting it again. See: openai/gpt-oss-safeguard-20b repair bug.
314+
"""
315+
with patch("kiln_ai.adapters.provider_tools.Config.shared") as mock_config_shared:
316+
mock_config_shared.return_value.openai_compatible_providers = [
317+
{
318+
"name": "vllm local",
319+
"base_url": "http://localhost:8000/v1",
320+
"api_key": "",
321+
}
322+
]
323+
324+
original_run_config = KilnAgentRunConfigProperties(
325+
model_name="vllm local::openai/gpt-oss-safeguard-20b",
326+
model_provider_name=ModelProviderName.openai_compatible,
327+
prompt_id="simple_prompt_builder",
328+
structured_output_mode="json_schema",
329+
)
330+
331+
adapter = adapter_for_task(
332+
kiln_task=basic_task,
333+
run_config_properties=original_run_config,
334+
)
335+
336+
assert isinstance(adapter, LiteLlmAdapter)
337+
# The slug round-trips intact on the run config that gets persisted.
338+
assert (
339+
adapter.config.run_config_properties.model_name
340+
== "vllm local::openai/gpt-oss-safeguard-20b"
341+
)
342+
# Litellm sees the bare model id (no "::").
343+
assert adapter.model_provider().model_id == "openai/gpt-oss-safeguard-20b"
344+
345+
# Simulate the repair flow: rebuild the adapter from the (now-correctly persisted)
346+
# run config. This used to raise "Invalid openai compatible model ID: ..." because
347+
# the first call had stripped the prefix.
348+
rehydrated = adapter_for_task(
349+
kiln_task=basic_task,
350+
run_config_properties=adapter.config.run_config_properties.model_copy(
351+
deep=True
352+
),
353+
)
354+
assert isinstance(rehydrated, LiteLlmAdapter)
355+
assert rehydrated.config.base_url == "http://localhost:8000/v1"
356+
357+
303358
def test_custom_openai_compatible_provider(mock_config, basic_task):
304359
adapter = adapter_for_task(
305360
kiln_task=basic_task,

libs/core/kiln_ai/adapters/test_provider_tools.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -662,19 +662,24 @@ def test_openai_compatible_provider_config(mock_shared_config):
662662
config.run_config_properties.model_provider_name
663663
== ModelProviderName.openai_compatible
664664
)
665-
assert config.run_config_properties.model_name == "gpt-4"
665+
# Full slug is preserved on the run config; the bare model id is only used at the litellm boundary.
666+
assert config.run_config_properties.model_name == "test_provider::gpt-4"
666667
assert config.additional_body_options == {"api_key": "test-key"}
667668
assert config.base_url == "https://api.test.com"
668669

669670

670671
def test_litellm_provider_model_success(mock_shared_config):
671-
"""Test successful creation of an OpenAI compatible provider"""
672+
"""Test successful creation of an OpenAI compatible provider.
673+
674+
The provider's model_id is the bare model id (without the legacy "provider::" prefix),
675+
since that's what litellm needs to call the upstream API.
676+
"""
672677
model_id = "test_provider::gpt-4"
673678

674679
provider = lite_llm_provider_model(model_id)
675680

676681
assert provider.name == ModelProviderName.openai_compatible
677-
assert provider.model_id == model_id
682+
assert provider.model_id == "gpt-4"
678683
assert provider.supports_structured_output is False
679684
assert provider.supports_data_gen is False
680685
assert provider.untested_model is True
@@ -697,7 +702,7 @@ def test_lite_llm_config_no_api_key(mock_shared_config):
697702
config.run_config_properties.model_provider_name
698703
== ModelProviderName.openai_compatible
699704
)
700-
assert config.run_config_properties.model_name == "gpt-4"
705+
assert config.run_config_properties.model_name == "no_key_provider::gpt-4"
701706
assert config.additional_body_options == {"api_key": "NA"}
702707
assert config.base_url == "https://api.nokey.com"
703708

0 commit comments

Comments
 (0)