@@ -289,17 +289,72 @@ def test_openai_compatible_adapter(basic_task):
289289 assert isinstance (adapter , LiteLlmAdapter )
290290 assert adapter .config .additional_body_options == {"api_key" : "test-key" }
291291 assert adapter .config .base_url == "https://test.com/v1"
292- assert adapter .config .run_config_properties .model_name == "test-model"
292+ # The full slug is preserved on the run config so it can be persisted and rehydrated.
293+ # The bare model id is only stripped at the litellm boundary (see lite_llm_provider_model).
294+ assert (
295+ adapter .config .run_config_properties .model_name
296+ == "some-provider::test-model"
297+ )
293298 assert (
294299 adapter .config .run_config_properties .model_provider_name
295300 == "openai_compatible"
296301 )
302+ assert adapter .model_provider ().model_id == "test-model"
297303 assert adapter .config .run_config_properties .prompt_id == "simple_prompt_builder"
298304 assert (
299305 adapter .config .run_config_properties .structured_output_mode == "json_schema"
300306 )
301307
302308
309+ def test_openai_compatible_adapter_preserves_model_name_for_rehydration (basic_task ):
310+ """Regression: building an adapter must not strip the legacy "{provider}::{model_id}"
311+ prefix off run_config.model_name. _properties_for_task_output writes that name to
312+ disk; if it's stripped, repair (and any other rehydration path) re-reads a name
313+ without "::" and crashes splitting it again. See: openai/gpt-oss-safeguard-20b repair bug.
314+ """
315+ with patch ("kiln_ai.adapters.provider_tools.Config.shared" ) as mock_config_shared :
316+ mock_config_shared .return_value .openai_compatible_providers = [
317+ {
318+ "name" : "vllm local" ,
319+ "base_url" : "http://localhost:8000/v1" ,
320+ "api_key" : "" ,
321+ }
322+ ]
323+
324+ original_run_config = KilnAgentRunConfigProperties (
325+ model_name = "vllm local::openai/gpt-oss-safeguard-20b" ,
326+ model_provider_name = ModelProviderName .openai_compatible ,
327+ prompt_id = "simple_prompt_builder" ,
328+ structured_output_mode = "json_schema" ,
329+ )
330+
331+ adapter = adapter_for_task (
332+ kiln_task = basic_task ,
333+ run_config_properties = original_run_config ,
334+ )
335+
336+ assert isinstance (adapter , LiteLlmAdapter )
337+ # The slug round-trips intact on the run config that gets persisted.
338+ assert (
339+ adapter .config .run_config_properties .model_name
340+ == "vllm local::openai/gpt-oss-safeguard-20b"
341+ )
342+ # Litellm sees the bare model id (no "::").
343+ assert adapter .model_provider ().model_id == "openai/gpt-oss-safeguard-20b"
344+
345+ # Simulate the repair flow: rebuild the adapter from the (now-correctly persisted)
346+ # run config. This used to raise "Invalid openai compatible model ID: ..." because
347+ # the first call had stripped the prefix.
348+ rehydrated = adapter_for_task (
349+ kiln_task = basic_task ,
350+ run_config_properties = adapter .config .run_config_properties .model_copy (
351+ deep = True
352+ ),
353+ )
354+ assert isinstance (rehydrated , LiteLlmAdapter )
355+ assert rehydrated .config .base_url == "http://localhost:8000/v1"
356+
357+
303358def test_custom_openai_compatible_provider (mock_config , basic_task ):
304359 adapter = adapter_for_task (
305360 kiln_task = basic_task ,
0 commit comments