Skip to content

Commit 9082c32

Browse files
authored
feat: impact metrics (#387)
1 parent 7951d32 commit 9082c32

File tree

8 files changed

+417
-3
lines changed

8 files changed

+417
-3
lines changed

README.md

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -319,6 +319,56 @@ client.is_enabled("testFlag")
319319

320320
```
321321

322+
### Impact metrics
323+
324+
Impact metrics are lightweight, application-level time-series metrics stored and visualized directly inside Unleash. They allow you to connect specific application data, such as request counts, error rates, or latency, to your feature flags and release plans.
325+
326+
These metrics help validate feature impact and automate release processes. For instance, you can monitor usage patterns or performance to determine if a feature meets its goals.
327+
328+
The SDK automatically attaches context labels to metrics: `appName` and `environment`.
329+
330+
#### Counters
331+
332+
Use counters for cumulative values that only increase (total requests, errors):
333+
334+
```python
335+
client.impact_metrics.define_counter(
336+
"request_count",
337+
"Total number of HTTP requests processed"
338+
)
339+
340+
client.impact_metrics.increment_counter("request_count")
341+
```
342+
343+
#### Gauges
344+
345+
Use gauges for point-in-time values that can go up or down:
346+
347+
```python
348+
client.impact_metrics.define_gauge(
349+
"total_users",
350+
"Total number of registered users"
351+
)
352+
353+
client.impact_metrics.update_gauge("total_users", user_count)
354+
```
355+
356+
#### Histograms
357+
358+
Histograms measure value distribution (request duration, response size):
359+
360+
```python
361+
client.impact_metrics.define_histogram(
362+
"request_time_ms",
363+
"Time taken to process a request in milliseconds",
364+
[50, 100, 200, 500, 1000]
365+
)
366+
367+
client.impact_metrics.observe_histogram("request_time_ms", 125)
368+
```
369+
370+
Impact metrics are batched and sent using the same interval as standard SDK metrics.
371+
322372
### Custom cache
323373

324374
By default, the Python SDK stores feature flags in an on-disk cache using fcache. If you need a different storage backend, for example, Redis, memory-only, or a custom database, you can provide your own cache implementation.

UnleashClient/__init__.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,14 @@
3535
SDK_NAME,
3636
SDK_VERSION,
3737
)
38+
from UnleashClient.environment_resolver import extract_environment_from_headers
3839
from UnleashClient.events import (
3940
BaseEvent,
4041
UnleashEvent,
4142
UnleashEventType,
4243
UnleashReadyEvent,
4344
)
45+
from UnleashClient.impact_metrics import ImpactMetrics
4446
from UnleashClient.periodic_tasks import (
4547
aggregate_and_send_metrics,
4648
)
@@ -206,6 +208,15 @@ def __init__(
206208
self.metric_job: Job = None
207209
self.engine = UnleashEngine()
208210

211+
impact_metrics_environment = self.unleash_environment
212+
extracted_env = extract_environment_from_headers(self.unleash_custom_headers)
213+
if extracted_env:
214+
impact_metrics_environment = extracted_env
215+
216+
self.impact_metrics = ImpactMetrics(
217+
self.engine, self.unleash_app_name, impact_metrics_environment
218+
)
219+
209220
self.cache = cache or FileCache(
210221
self.unleash_app_name, directory=cache_directory
211222
)
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from typing import Dict, Optional
2+
3+
4+
def extract_environment_from_headers(
5+
headers: Optional[Dict[str, str]],
6+
) -> Optional[str]:
7+
if not headers:
8+
return None
9+
10+
auth_key = next(
11+
(key for key in headers if key.lower() == "authorization"),
12+
None,
13+
)
14+
if not auth_key:
15+
return None
16+
17+
auth_value = headers.get(auth_key)
18+
if not auth_value:
19+
return None
20+
21+
_, sep, after_colon = auth_value.partition(":")
22+
if not sep:
23+
return None
24+
25+
environment, _, _ = after_colon.partition(".")
26+
return environment or None

UnleashClient/impact_metrics.py

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
from dataclasses import dataclass, field
2+
from typing import Any, Dict, List, Optional
3+
4+
from yggdrasil_engine.engine import UnleashEngine
5+
6+
7+
@dataclass
8+
class MetricFlagContext:
9+
"""Context for resolving feature flag values as metric labels."""
10+
11+
flag_names: List[str] = field(default_factory=list)
12+
context: Dict[str, Any] = field(default_factory=dict)
13+
14+
15+
class ImpactMetrics:
16+
"""
17+
Provides methods to define and record metrics (counters, gauges, histograms)
18+
with optional feature flag context that gets resolved to labels.
19+
"""
20+
21+
def __init__(self, engine: UnleashEngine, app_name: str, environment: str):
22+
self._engine = engine
23+
self._base_labels = {
24+
"appName": app_name,
25+
"environment": environment,
26+
}
27+
28+
def define_counter(self, name: str, help_text: str) -> None:
29+
self._engine.define_counter(name, help_text)
30+
31+
def increment_counter(
32+
self,
33+
name: str,
34+
value: int = 1,
35+
flag_context: Optional[MetricFlagContext] = None,
36+
) -> None:
37+
labels = self._resolve_labels(flag_context)
38+
self._engine.inc_counter(name, value, labels)
39+
40+
def define_gauge(self, name: str, help_text: str) -> None:
41+
self._engine.define_gauge(name, help_text)
42+
43+
def update_gauge(
44+
self,
45+
name: str,
46+
value: float,
47+
flag_context: Optional[MetricFlagContext] = None,
48+
) -> None:
49+
labels = self._resolve_labels(flag_context)
50+
self._engine.set_gauge(name, value, labels)
51+
52+
def define_histogram(
53+
self, name: str, help_text: str, buckets: Optional[List[float]] = None
54+
) -> None:
55+
self._engine.define_histogram(name, help_text, buckets)
56+
57+
def observe_histogram(
58+
self,
59+
name: str,
60+
value: float,
61+
flag_context: Optional[MetricFlagContext] = None,
62+
) -> None:
63+
labels = self._resolve_labels(flag_context)
64+
self._engine.observe_histogram(name, value, labels)
65+
66+
def _variant_label(self, flag_name: str, context: Dict[str, Any]) -> str:
67+
variant = self._engine.get_variant(flag_name, context)
68+
if variant and variant.enabled:
69+
return variant.name
70+
if variant and variant.feature_enabled:
71+
return "enabled"
72+
return "disabled"
73+
74+
def _resolve_labels(
75+
self, flag_context: Optional[MetricFlagContext]
76+
) -> Dict[str, str]:
77+
if not flag_context:
78+
return dict(self._base_labels)
79+
80+
return {
81+
**self._base_labels,
82+
**{
83+
flag: self._variant_label(flag, flag_context.context)
84+
for flag in flag_context.flag_names
85+
},
86+
}

UnleashClient/periodic_tasks/send_metrics.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,12 @@ def aggregate_and_send_metrics(
2020
) -> None:
2121
metrics_bucket = engine.get_metrics()
2222

23+
try:
24+
impact_metrics = engine.collect_impact_metrics()
25+
except Exception as exc:
26+
LOGGER.warning("Failed to collect impact metrics: %s", exc)
27+
impact_metrics = None
28+
2329
metrics_request = {
2430
"appName": app_name,
2531
"instanceId": instance_id,
@@ -31,7 +37,14 @@ def aggregate_and_send_metrics(
3137
"specVersion": CLIENT_SPEC_VERSION,
3238
}
3339

34-
if metrics_bucket:
35-
send_metrics(url, metrics_request, headers, custom_options, request_timeout)
40+
if impact_metrics:
41+
metrics_request["impactMetrics"] = impact_metrics
42+
43+
if metrics_bucket or impact_metrics:
44+
success = send_metrics(
45+
url, metrics_request, headers, custom_options, request_timeout
46+
)
47+
if not success and impact_metrics:
48+
engine.restore_impact_metrics(impact_metrics)
3649
else:
3750
LOGGER.debug("No feature flags with metrics, skipping metrics submission.")

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ mmhash3
66
python-dateutil
77
requests
88
semver
9-
yggdrasil-engine>=1.0.0
9+
yggdrasil-engine>=1.2.1
1010
launchdarkly-eventsource
1111

1212
# Development packages
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
from UnleashClient.environment_resolver import extract_environment_from_headers
2+
3+
4+
def test_valid_headers():
5+
custom_headers = {
6+
"Authorization": "project:environment.hash",
7+
"Content-Type": "application/json",
8+
}
9+
10+
result = extract_environment_from_headers(custom_headers)
11+
assert result == "environment"
12+
13+
14+
def test_case_insensitive_header_keys():
15+
custom_headers = {
16+
"AUTHORIZATION": "project:environment.hash",
17+
"Content-Type": "application/json",
18+
}
19+
20+
result = extract_environment_from_headers(custom_headers)
21+
assert result == "environment"
22+
23+
24+
def test_authorization_header_not_present():
25+
result = extract_environment_from_headers({})
26+
assert result is None
27+
28+
29+
def test_environment_part_is_empty():
30+
custom_headers = {
31+
"Authorization": "project:.hash",
32+
}
33+
34+
result = extract_environment_from_headers(custom_headers)
35+
assert result is None

0 commit comments

Comments
 (0)