Skip to content

Commit f2de5ba

Browse files
committed
LCORE-716: refractor duplicate integration/endpoints tests to parameterized tests
Refactor integration tests to use parameterized/data-driven test pattern, eliminating duplicate test code and improving maintainability. Converted 19 duplicate test functions into 6 parameterized test functions with 21 test cases Signed-off-by: Anik Bhattacharjee <anbhatta@redhat.com>
1 parent b250fca commit f2de5ba

File tree

5 files changed

+555
-618
lines changed

5 files changed

+555
-618
lines changed

tests/integration/README.md

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,107 @@ uv run make test-integration
252252
uv run pytest tests/integration/ -v --tb=short
253253
```
254254

255+
## Data-Driven (Parameterized) Tests
256+
257+
### Overview
258+
259+
Data-driven tests use `@pytest.mark.parametrize` to run the same test logic with different inputs. This eliminates duplicate code and makes test coverage more visible.
260+
261+
**Benefits:**
262+
- Reduce code duplication (~80% reduction for repetitive tests)
263+
- Add new test cases by simply adding to the data table
264+
- See all test scenarios at a glance
265+
- Consistent structure across similar tests
266+
267+
### When to Use
268+
269+
Use parameterized tests when you have:
270+
- **Multiple similar tests** that differ only in input data and expected output
271+
- **Validation tests** with multiple valid/invalid scenarios
272+
- **Error handling tests** with different error conditions
273+
274+
### Pattern
275+
276+
```python
277+
# Define test cases as a list
278+
TEST_CASES = [
279+
pytest.param(
280+
{
281+
"input": "value1",
282+
"expected_result": "result1",
283+
},
284+
id="descriptive_test_name_1",
285+
),
286+
pytest.param(
287+
{
288+
"input": "value2",
289+
"expected_result": "result2",
290+
},
291+
id="descriptive_test_name_2",
292+
),
293+
]
294+
295+
@pytest.mark.asyncio
296+
@pytest.mark.parametrize("test_case", TEST_CASES)
297+
async def test_example_data_driven(
298+
test_case: dict,
299+
# ... fixtures
300+
) -> None:
301+
"""Data-driven test for example functionality.
302+
303+
Tests multiple scenarios:
304+
- Scenario 1 description
305+
- Scenario 2 description
306+
307+
Parameters:
308+
test_case: Dictionary containing test parameters
309+
# ... other fixtures
310+
"""
311+
input_value = test_case["input"]
312+
expected = test_case["expected_result"]
313+
314+
result = await function_under_test(input_value)
315+
316+
assert result == expected
317+
```
318+
319+
### Best Practices for Parameterized Tests
320+
321+
1. **Use descriptive `id` values** - They appear in test output
322+
```python
323+
pytest.param(..., id="attachment_unknown_type_returns_422") # Good
324+
pytest.param(..., id="test1") # Bad
325+
```
326+
327+
2. **Group related test data** - Keep test cases together at module level
328+
```python
329+
ATTACHMENT_TEST_CASES = [...] # Define near the test that uses it
330+
```
331+
332+
3. **Document all scenarios** - List scenarios in docstring
333+
```python
334+
"""Data-driven test for attachments.
335+
336+
Tests:
337+
- Single attachment
338+
- Empty payload
339+
- Invalid type (422 error)
340+
"""
341+
```
342+
343+
4. **Keep test logic simple** - Use if/else only for success vs. error paths
344+
```python
345+
if expected_status == 200:
346+
# Success assertions
347+
else:
348+
# Error assertions
349+
```
350+
351+
5. **Use consistent dict keys** - Standardize parameter names
352+
```python
353+
{"expected_status": 200, "expected_error": None} # Consistent
354+
```
355+
255356
## Best Practices
256357

257358
### 1. Use Common Fixtures

tests/integration/endpoints/test_model_list.py

Lines changed: 67 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -86,55 +86,75 @@ def mock_llama_stack_client_failing_fixture(
8686
yield mock_client
8787

8888

89-
@pytest.mark.asyncio
90-
async def test_models_list(
91-
test_config: AppConfig,
92-
mock_llama_stack_client: AsyncMockType,
93-
test_request: Request,
94-
test_auth: AuthTuple,
95-
) -> None:
96-
"""Test that models endpoint returns successful response.
97-
98-
This integration test verifies:
99-
- Model list handler
100-
101-
Parameters:
102-
test_config: Test configuration
103-
mock_llama_stack_client: Mocked Llama Stack client
104-
test_request: FastAPI request
105-
test_auth: noop authentication tuple
106-
"""
107-
_ = test_config
108-
_ = mock_llama_stack_client
109-
110-
response = await models_endpoint_handler(
111-
request=test_request,
112-
auth=test_auth,
113-
model_type=ModelFilter(model_type=None),
114-
)
115-
116-
# Verify response structure
117-
assert response is not None
118-
assert len(response.models) == 2
119-
assert response.models[0]["identifier"] == "test-provider/test-model-1"
120-
assert response.models[0]["api_model_type"] == "llm"
121-
assert response.models[1]["identifier"] == "test-provider/test-model-2"
122-
assert response.models[1]["api_model_type"] == "embedding"
89+
MODEL_FILTER_TEST_CASES = [
90+
pytest.param(
91+
{
92+
"filter_type": None,
93+
"expected_count": 2,
94+
"expected_models": [
95+
{"identifier": "test-provider/test-model-1", "api_model_type": "llm"},
96+
{
97+
"identifier": "test-provider/test-model-2",
98+
"api_model_type": "embedding",
99+
},
100+
],
101+
},
102+
id="no_filter_returns_all_models",
103+
),
104+
pytest.param(
105+
{
106+
"filter_type": "llm",
107+
"expected_count": 1,
108+
"expected_models": [
109+
{"identifier": "test-provider/test-model-1", "api_model_type": "llm"},
110+
],
111+
},
112+
id="filter_llm_returns_llm_model",
113+
),
114+
pytest.param(
115+
{
116+
"filter_type": "embedding",
117+
"expected_count": 1,
118+
"expected_models": [
119+
{
120+
"identifier": "test-provider/test-model-2",
121+
"api_model_type": "embedding",
122+
},
123+
],
124+
},
125+
id="filter_embedding_returns_embedding_model",
126+
),
127+
pytest.param(
128+
{
129+
"filter_type": "foobar",
130+
"expected_count": 0,
131+
"expected_models": [],
132+
},
133+
id="filter_unknown_type_returns_empty",
134+
),
135+
]
123136

124137

125138
@pytest.mark.asyncio
126-
async def test_models_list_filter_model_type_llm(
139+
@pytest.mark.parametrize("test_case", MODEL_FILTER_TEST_CASES)
140+
async def test_models_list_with_filter(
141+
test_case: dict,
127142
test_config: AppConfig,
128143
mock_llama_stack_client: AsyncMockType,
129144
test_request: Request,
130145
test_auth: AuthTuple,
131146
) -> None:
132-
"""Test that models endpoint returns successful response.
147+
"""Tests for models endpoint filtering.
133148
134-
This integration test verifies:
135-
- Model list handler
149+
Tests different model_type filter scenarios:
150+
- No filter (returns all models)
151+
- Filter by llm type
152+
- Filter by embedding type
153+
- Filter by unknown type (returns empty)
136154
137155
Parameters:
156+
test_case: Dictionary containing test parameters (filter_type,
157+
expected_count, expected_models)
138158
test_config: Test configuration
139159
mock_llama_stack_client: Mocked Llama Stack client
140160
test_request: FastAPI request
@@ -143,81 +163,24 @@ async def test_models_list_filter_model_type_llm(
143163
_ = test_config
144164
_ = mock_llama_stack_client
145165

146-
response = await models_endpoint_handler(
147-
request=test_request, auth=test_auth, model_type=ModelFilter(model_type="llm")
148-
)
149-
150-
# Verify response structure
151-
assert response is not None
152-
assert len(response.models) == 1
153-
assert response.models[0]["identifier"] == "test-provider/test-model-1"
154-
assert response.models[0]["api_model_type"] == "llm"
155-
156-
157-
@pytest.mark.asyncio
158-
async def test_models_list_filter_model_type_embedding(
159-
test_config: AppConfig,
160-
mock_llama_stack_client: AsyncMockType,
161-
test_request: Request,
162-
test_auth: AuthTuple,
163-
) -> None:
164-
"""Test that models endpoint returns successful response.
165-
166-
This integration test verifies:
167-
- Model list handler
168-
169-
Parameters:
170-
test_config: Test configuration
171-
mock_llama_stack_client: Mocked Llama Stack client
172-
test_request: FastAPI request
173-
test_auth: noop authentication tuple
174-
"""
175-
_ = test_config
176-
_ = mock_llama_stack_client
166+
filter_type = test_case["filter_type"]
167+
expected_count = test_case["expected_count"]
168+
expected_models = test_case["expected_models"]
177169

178170
response = await models_endpoint_handler(
179171
request=test_request,
180172
auth=test_auth,
181-
model_type=ModelFilter(model_type="embedding"),
173+
model_type=ModelFilter(model_type=filter_type),
182174
)
183175

184176
# Verify response structure
185177
assert response is not None
186-
assert len(response.models) == 1
187-
assert response.models[0]["identifier"] == "test-provider/test-model-2"
188-
assert response.models[0]["api_model_type"] == "embedding"
189-
190-
191-
@pytest.mark.asyncio
192-
async def test_models_list_filter_model_type_unknown(
193-
test_config: AppConfig,
194-
mock_llama_stack_client: AsyncMockType,
195-
test_request: Request,
196-
test_auth: AuthTuple,
197-
) -> None:
198-
"""Test that models endpoint returns successful response.
199-
200-
This integration test verifies:
201-
- Model list handler
202-
203-
Parameters:
204-
test_config: Test configuration
205-
mock_llama_stack_client: Mocked Llama Stack client
206-
test_request: FastAPI request
207-
test_auth: noop authentication tuple
208-
"""
209-
_ = test_config
210-
_ = mock_llama_stack_client
211-
212-
response = await models_endpoint_handler(
213-
request=test_request,
214-
auth=test_auth,
215-
model_type=ModelFilter(model_type="foobar"),
216-
)
178+
assert len(response.models) == expected_count
217179

218-
# Verify response structure
219-
assert response is not None
220-
assert len(response.models) == 0
180+
# Verify each expected model
181+
for i, expected_model in enumerate(expected_models):
182+
assert response.models[i]["identifier"] == expected_model["identifier"]
183+
assert response.models[i]["api_model_type"] == expected_model["api_model_type"]
221184

222185

223186
@pytest.mark.asyncio

0 commit comments

Comments
 (0)