Skip to content

Commit 9aebb07

Browse files
committed
test(rlsapi): add initial e2e test for v1 infer endpoint
Add a basic smoke test for the RLSAPI v1 infer endpoint to verify the endpoint is reachable and returns a valid JSON response. Signed-off-by: Major Hayden <major@redhat.com>
1 parent ade5b5d commit 9aebb07

File tree

10 files changed

+117
-7
lines changed

10 files changed

+117
-7
lines changed

.github/workflows/e2e_tests.yaml

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -135,9 +135,9 @@ jobs:
135135
run: |
136136
CONFIGS_DIR="tests/e2e/configs"
137137
ENVIRONMENT="$CONFIG_ENVIRONMENT"
138-
138+
139139
echo "Looking for configurations in $CONFIGS_DIR/"
140-
140+
141141
# List available configurations
142142
if [ -d "$CONFIGS_DIR" ]; then
143143
echo "Available configurations:"
@@ -146,12 +146,12 @@ jobs:
146146
echo "Configs directory '$CONFIGS_DIR' not found!"
147147
exit 1
148148
fi
149-
149+
150150
# Determine which config file to use
151151
CONFIG_FILE="$CONFIGS_DIR/run-$ENVIRONMENT.yaml"
152-
152+
153153
echo "Looking for: $CONFIG_FILE"
154-
154+
155155
if [ -f "$CONFIG_FILE" ]; then
156156
echo "✅ Found config for environment: $ENVIRONMENT"
157157
cp "$CONFIG_FILE" run.yaml
@@ -163,6 +163,30 @@ jobs:
163163
exit 1
164164
fi
165165
166+
- name: Set default model for rlsapi v1 tests
167+
run: |
168+
# Set default model/provider for rlsapi v1 endpoint based on environment
169+
case "${{ matrix.environment }}" in
170+
ci)
171+
echo "E2E_DEFAULT_PROVIDER=openai" >> $GITHUB_ENV
172+
echo "E2E_DEFAULT_MODEL=gpt-4o-mini" >> $GITHUB_ENV
173+
;;
174+
azure)
175+
echo "E2E_DEFAULT_PROVIDER=azure" >> $GITHUB_ENV
176+
echo "E2E_DEFAULT_MODEL=gpt-4o-mini" >> $GITHUB_ENV
177+
;;
178+
vertexai)
179+
echo "E2E_DEFAULT_PROVIDER=google-vertex" >> $GITHUB_ENV
180+
echo "E2E_DEFAULT_MODEL=gemini-2.0-flash-exp" >> $GITHUB_ENV
181+
;;
182+
*)
183+
echo "⚠️ Unknown environment: ${{ matrix.environment }}, using defaults"
184+
echo "E2E_DEFAULT_PROVIDER=openai" >> $GITHUB_ENV
185+
echo "E2E_DEFAULT_MODEL=gpt-4o-mini" >> $GITHUB_ENV
186+
;;
187+
esac
188+
echo "✅ Set E2E_DEFAULT_PROVIDER=${E2E_DEFAULT_PROVIDER} and E2E_DEFAULT_MODEL=${E2E_DEFAULT_MODEL}"
189+
166190
- name: Show final configuration
167191
run: |
168192
echo "=== Configuration Summary ==="

docker-compose-library.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ services:
1919
# OpenAI
2020
- OPENAI_API_KEY=${OPENAI_API_KEY}
2121
- E2E_OPENAI_MODEL=${E2E_OPENAI_MODEL:-gpt-4-turbo}
22+
# Default model for rlsapi v1 tests
23+
- E2E_DEFAULT_PROVIDER=${E2E_DEFAULT_PROVIDER:-openai}
24+
- E2E_DEFAULT_MODEL=${E2E_DEFAULT_MODEL:-gpt-4o-mini}
2225
# Azure
2326
- AZURE_API_KEY=${AZURE_API_KEY:-}
2427
# RHAIIS

docker-compose.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,9 @@ services:
5555
environment:
5656
- OPENAI_API_KEY=${OPENAI_API_KEY}
5757
- AZURE_API_KEY=${AZURE_API_KEY}
58+
# Default model for rlsapi v1 tests
59+
- E2E_DEFAULT_PROVIDER=${E2E_DEFAULT_PROVIDER:-openai}
60+
- E2E_DEFAULT_MODEL=${E2E_DEFAULT_MODEL:-gpt-4o-mini}
5861
depends_on:
5962
llama-stack:
6063
condition: service_healthy
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
name: Lightspeed Core Service (LCS)
2+
service:
3+
host: 0.0.0.0
4+
port: 8080
5+
auth_enabled: false
6+
workers: 1
7+
color_log: true
8+
access_log: true
9+
llama_stack:
10+
# Library mode - embeds llama-stack as library
11+
use_as_library_client: true
12+
library_client_config_path: run.yaml
13+
user_data_collection:
14+
feedback_enabled: true
15+
feedback_storage: "/tmp/data/feedback"
16+
transcripts_enabled: true
17+
transcripts_storage: "/tmp/data/transcripts"
18+
authentication:
19+
module: "noop"
20+
inference:
21+
# Configure default model/provider for rlsapi v1 endpoint
22+
# These are set per-environment in the CI workflow
23+
default_provider: ${env.E2E_DEFAULT_PROVIDER:=openai}
24+
default_model: ${env.E2E_DEFAULT_MODEL:=gpt-4o-mini}

tests/e2e/configuration/library-mode/lightspeed-stack.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,4 +16,4 @@ user_data_collection:
1616
transcripts_enabled: true
1717
transcripts_storage: "/tmp/data/transcripts"
1818
authentication:
19-
module: "noop"
19+
module: "noop"
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Lightspeed Core Service (LCS)
2+
service:
3+
host: 0.0.0.0
4+
port: 8080
5+
auth_enabled: false
6+
workers: 1
7+
color_log: true
8+
access_log: true
9+
llama_stack:
10+
# Server mode - connects to separate llama-stack service
11+
use_as_library_client: false
12+
url: http://llama-stack:8321
13+
api_key: xyzzy
14+
user_data_collection:
15+
feedback_enabled: true
16+
feedback_storage: "/tmp/data/feedback"
17+
transcripts_enabled: true
18+
transcripts_storage: "/tmp/data/transcripts"
19+
authentication:
20+
module: "noop"
21+
inference:
22+
# Configure default model/provider for rlsapi v1 endpoint
23+
# These are set per-environment in the CI workflow
24+
default_provider: ${env.E2E_DEFAULT_PROVIDER:=openai}
25+
default_model: ${env.E2E_DEFAULT_MODEL:=gpt-4o-mini}

tests/e2e/configuration/server-mode/lightspeed-stack.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,4 +17,4 @@ user_data_collection:
1717
transcripts_enabled: true
1818
transcripts_storage: "/tmp/data/transcripts"
1919
authentication:
20-
module: "noop"
20+
module: "noop"

tests/e2e/features/environment.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,15 @@ def before_feature(context: Context, feature: Feature) -> None:
171171
switch_config(context.feature_config)
172172
restart_container("lightspeed-stack")
173173

174+
if "RlsapiConfig" in feature.tags:
175+
mode_dir = "library-mode" if context.is_library_mode else "server-mode"
176+
context.feature_config = (
177+
f"tests/e2e/configuration/{mode_dir}/lightspeed-stack-rlsapi.yaml"
178+
)
179+
context.default_config_backup = create_config_backup("lightspeed-stack.yaml")
180+
switch_config(context.feature_config)
181+
restart_container("lightspeed-stack")
182+
174183
if "Feedback" in feature.tags:
175184
context.hostname = os.getenv("E2E_LSC_HOSTNAME", "localhost")
176185
context.port = os.getenv("E2E_LSC_PORT", "8080")
@@ -184,6 +193,11 @@ def after_feature(context: Context, feature: Feature) -> None:
184193
restart_container("lightspeed-stack")
185194
remove_config_backup(context.default_config_backup)
186195

196+
if "RlsapiConfig" in feature.tags:
197+
switch_config(context.default_config_backup)
198+
restart_container("lightspeed-stack")
199+
remove_config_backup(context.default_config_backup)
200+
187201
if "Feedback" in feature.tags:
188202
for conversation_id in context.feedback_conversations:
189203
url = f"http://{context.hostname}:{context.port}/v1/conversations/{conversation_id}"
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
@RlsapiConfig
2+
Feature: RLSAPI v1 infer endpoint
3+
Basic tests for the RLSAPI v1 inference endpoint.
4+
5+
Background:
6+
Given The service is started locally
7+
And REST API service prefix is /v1
8+
9+
Scenario: Verify RLSAPI v1 infer endpoint returns 200
10+
Given The system is in default state
11+
When I access REST API endpoint "infer" using HTTP POST method
12+
"""
13+
{"question": "Say hello"}
14+
"""
15+
Then The status code of the response is 200
16+
And Content type of response should be set to "application/json"

tests/e2e/test_list.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,4 @@ features/info.feature
99
features/query.feature
1010
features/streaming_query.feature
1111
features/rest_api.feature
12+
features/rlsapi_v1.feature

0 commit comments

Comments
 (0)