Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 102 additions & 0 deletions tests/integration/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ This directory contains integration tests for Lightspeed Core Stack. Integration
- [Test Constants](#test-constants)
- [Writing Integration Tests](#writing-integration-tests)
- [Running Tests](#running-tests)
- [Data-Driven (Parameterized) Tests](#data-driven-parameterized-tests)
- [Best Practices](#best-practices)

## Getting Started
Expand Down Expand Up @@ -252,6 +253,107 @@ uv run make test-integration
uv run pytest tests/integration/ -v --tb=short
```

## Data-Driven (Parameterized) Tests

### Overview

Data-driven tests use `@pytest.mark.parametrize` to run the same test logic with different inputs. This eliminates duplicate code and makes test coverage more visible.

**Benefits:**
- Reduce code duplication
- Add new test cases by simply adding to the data table
- See all test scenarios at a glance
- Consistent structure across similar tests

### When to Use

Use parameterized tests when you have:
- **Multiple similar tests** that differ only in input data and expected output
- **Validation tests** with multiple valid/invalid scenarios
- **Error handling tests** with different error conditions

### Pattern

```python
# Define test cases as a list
TEST_CASES = [
pytest.param(
{
"input": "value1",
"expected_result": "result1",
},
id="descriptive_test_name_1",
),
pytest.param(
{
"input": "value2",
"expected_result": "result2",
},
id="descriptive_test_name_2",
),
]

@pytest.mark.asyncio
@pytest.mark.parametrize("test_case", TEST_CASES)
async def test_example_data_driven(
test_case: dict,
# ... fixtures
) -> None:
"""Data-driven test for example functionality.

Tests multiple scenarios:
- Scenario 1 description
- Scenario 2 description

Parameters:
test_case: Dictionary containing test parameters
# ... other fixtures
"""
input_value = test_case["input"]
expected = test_case["expected_result"]

result = await function_under_test(input_value)

assert result == expected
```

### Best Practices for Parameterized Tests

1. **Use descriptive `id` values** - They appear in test output
```python
pytest.param(..., id="attachment_unknown_type_returns_422") # Good
pytest.param(..., id="test1") # Bad
```

2. **Group related test data** - Keep test cases together at module level
```python
ATTACHMENT_TEST_CASES = [...] # Define near the test that uses it
```

3. **Document all scenarios** - List scenarios in docstring
```python
"""Data-driven test for attachments.

Tests:
- Single attachment
- Empty payload
- Invalid type (422 error)
"""
```

4. **Keep test logic simple** - Use if/else only for success vs. error paths
```python
if expected_status == 200:
# Success assertions
else:
# Error assertions
```

5. **Use consistent dict keys** - Standardize parameter names
```python
{"expected_status": 200, "expected_error": None} # Consistent
```

## Best Practices

### 1. Use Common Fixtures
Expand Down
157 changes: 53 additions & 104 deletions tests/integration/endpoints/test_model_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,55 +86,61 @@ def mock_llama_stack_client_failing_fixture(
yield mock_client


@pytest.mark.asyncio
async def test_models_list(
test_config: AppConfig,
mock_llama_stack_client: AsyncMockType,
test_request: Request,
test_auth: AuthTuple,
) -> None:
"""Test that models endpoint returns successful response.

This integration test verifies:
- Model list handler

Parameters:
test_config: Test configuration
mock_llama_stack_client: Mocked Llama Stack client
test_request: FastAPI request
test_auth: noop authentication tuple
"""
_ = test_config
_ = mock_llama_stack_client

response = await models_endpoint_handler(
request=test_request,
auth=test_auth,
model_type=ModelFilter(model_type=None),
)

# Verify response structure
assert response is not None
assert len(response.models) == 2
assert response.models[0]["identifier"] == "test-provider/test-model-1"
assert response.models[0]["api_model_type"] == "llm"
assert response.models[1]["identifier"] == "test-provider/test-model-2"
assert response.models[1]["api_model_type"] == "embedding"
MODEL_FILTER_TEST_CASES = [
pytest.param(
{
"filter_type": None,
"expected_count": 2,
"expected_models": [
{"identifier": "test-provider/test-model-1", "api_model_type": "llm"},
{
"identifier": "test-provider/test-model-2",
"api_model_type": "embedding",
},
],
},
id="no_filter_returns_all_models",
),
pytest.param(
{
"filter_type": "llm",
"expected_count": 1,
"expected_models": [
{"identifier": "test-provider/test-model-1", "api_model_type": "llm"},
],
},
id="filter_llm_returns_llm_model",
),
pytest.param(
{
"filter_type": "foobar",
"expected_count": 0,
"expected_models": [],
},
id="filter_unknown_type_returns_empty",
),
]


@pytest.mark.asyncio
async def test_models_list_filter_model_type_llm(
@pytest.mark.parametrize("test_case", MODEL_FILTER_TEST_CASES)
async def test_models_list_with_filter(
test_case: dict,
test_config: AppConfig,
mock_llama_stack_client: AsyncMockType,
test_request: Request,
test_auth: AuthTuple,
) -> None:
"""Test that models endpoint returns successful response.
"""Tests for models endpoint filtering.

This integration test verifies:
- Model list handler
Tests different model_type filter scenarios:
- No filter (returns all models)
- Filter by llm type
- Filter by unknown type (returns empty)

Parameters:
test_case: Dictionary containing test parameters (filter_type,
expected_count, expected_models)
test_config: Test configuration
mock_llama_stack_client: Mocked Llama Stack client
test_request: FastAPI request
Expand All @@ -143,81 +149,24 @@ async def test_models_list_filter_model_type_llm(
_ = test_config
_ = mock_llama_stack_client

response = await models_endpoint_handler(
request=test_request, auth=test_auth, model_type=ModelFilter(model_type="llm")
)

# Verify response structure
assert response is not None
assert len(response.models) == 1
assert response.models[0]["identifier"] == "test-provider/test-model-1"
assert response.models[0]["api_model_type"] == "llm"


@pytest.mark.asyncio
async def test_models_list_filter_model_type_embedding(
test_config: AppConfig,
mock_llama_stack_client: AsyncMockType,
test_request: Request,
test_auth: AuthTuple,
) -> None:
"""Test that models endpoint returns successful response.

This integration test verifies:
- Model list handler

Parameters:
test_config: Test configuration
mock_llama_stack_client: Mocked Llama Stack client
test_request: FastAPI request
test_auth: noop authentication tuple
"""
_ = test_config
_ = mock_llama_stack_client
filter_type = test_case["filter_type"]
expected_count = test_case["expected_count"]
expected_models = test_case["expected_models"]

response = await models_endpoint_handler(
request=test_request,
auth=test_auth,
model_type=ModelFilter(model_type="embedding"),
model_type=ModelFilter(model_type=filter_type),
)

# Verify response structure
assert response is not None
assert len(response.models) == 1
assert response.models[0]["identifier"] == "test-provider/test-model-2"
assert response.models[0]["api_model_type"] == "embedding"


@pytest.mark.asyncio
async def test_models_list_filter_model_type_unknown(
test_config: AppConfig,
mock_llama_stack_client: AsyncMockType,
test_request: Request,
test_auth: AuthTuple,
) -> None:
"""Test that models endpoint returns successful response.

This integration test verifies:
- Model list handler

Parameters:
test_config: Test configuration
mock_llama_stack_client: Mocked Llama Stack client
test_request: FastAPI request
test_auth: noop authentication tuple
"""
_ = test_config
_ = mock_llama_stack_client

response = await models_endpoint_handler(
request=test_request,
auth=test_auth,
model_type=ModelFilter(model_type="foobar"),
)
assert len(response.models) == expected_count

# Verify response structure
assert response is not None
assert len(response.models) == 0
# Verify each expected model
for i, expected_model in enumerate(expected_models):
assert response.models[i]["identifier"] == expected_model["identifier"]
assert response.models[i]["api_model_type"] == expected_model["api_model_type"]


@pytest.mark.asyncio
Expand Down
Loading
Loading