-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMain_reasoning.py
More file actions
149 lines (128 loc) · 5.26 KB
/
Main_reasoning.py
File metadata and controls
149 lines (128 loc) · 5.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
"""
DFOD reasoning with weighted multimodal memory, unified score spaces,
optional metric fusion, and calibration-aware rejection.
"""
from __future__ import annotations
from typing import Any, Dict, List, Tuple
import numpy as np
from dfod_config import normalize_config
from Baseline_Reasoning.calibration import resolve_calibration
from Baseline_Reasoning.formatting import format_results
from Baseline_Reasoning.memory import build_memory_bank
from Baseline_Reasoning.rejection import apply_unknown_rejection
from Baseline_Reasoning.serialization import MemoryBundle, load_memory_bundle
from Baseline_Reasoning.similarity import compute_metric_score_details, fuse_metric_probabilities
def _finalize_scores(
embeddings: np.ndarray,
memory_bank: Dict[str, Any],
config: Dict[str, Any],
) -> Tuple[np.ndarray, Dict[str, Dict[str, np.ndarray]], Dict[str, Any]]:
metrics = config["reasoning"]["metrics"]
details = compute_metric_score_details(
embeddings=embeddings,
memory_bank=memory_bank,
metrics=metrics,
)
reasoning_cfg = config["reasoning"]
primary_metric = str(reasoning_cfg["primary_metric"]).strip().lower()
if reasoning_cfg.get("enable_metric_fusion", False) and len(details) > 1:
final_scores = fuse_metric_probabilities(
metric_details=details,
metric_weights=reasoning_cfg.get("metric_weights", {}),
temperature=float(reasoning_cfg.get("softmax_temperature", 1.0)),
)
provenance = {
"score_metric": "fused",
"primary_metric": primary_metric,
"best_mode_indices": details[primary_metric]["best_mode_indices"],
}
else:
final_scores = details[primary_metric]["class_scores"]
provenance = {
"score_metric": primary_metric,
"primary_metric": primary_metric,
"best_mode_indices": details[primary_metric]["best_mode_indices"],
}
return final_scores.astype(np.float32, copy=False), details, provenance
def run_reasoning(
embeddings: np.ndarray,
memory_path: str,
config: Dict[str, object] | None = None,
) -> List[Dict[str, object]]:
formatted, _, _, _ = run_reasoning_detailed(
embeddings=embeddings,
memory_path=memory_path,
config=config,
)
return formatted
def _run_reasoning_with_bundle(
embeddings: np.ndarray,
memory_bundle: MemoryBundle,
config: Dict[str, object] | None = None,
) -> Tuple[List[Dict[str, object]], List[Dict[str, object]], np.ndarray, List[str]]:
cfg = normalize_config(config)
E = np.asarray(embeddings, dtype=np.float32)
if E.ndim != 2:
raise ValueError(f"embeddings must be 2D, got shape={E.shape}")
if E.shape[0] == 0:
return [], [], np.zeros((0, 0), dtype=np.float32), []
memory_bank = build_memory_bank(memory_bundle, config=cfg)
final_scores, metric_details, provenance = _finalize_scores(E, memory_bank, cfg)
class_names = list(memory_bank["class_names"])
calibration = resolve_calibration(config=cfg, memory_bundle=memory_bundle)
diagnostics = {
"score_metric": provenance["score_metric"],
"primary_metric": provenance["primary_metric"],
}
decisions = apply_unknown_rejection(
scores=final_scores,
class_names=class_names,
config=cfg,
calibration=calibration,
diagnostics=diagnostics if cfg["diagnostics"].get("include", True) else None,
)
if decisions and cfg["diagnostics"].get("include", True):
best_modes = provenance["best_mode_indices"]
for row_idx, decision in enumerate(decisions):
top1_idx = int(decision.get("top1_idx", -1))
if top1_idx >= 0 and best_modes.shape[1] > top1_idx:
decision["mode_index"] = int(best_modes[row_idx, top1_idx])
decision["mode_id"] = int(memory_bank["mode_ids"][decision["mode_index"]])
decision["metric_scores"] = {
metric: float(detail["class_scores"][row_idx, top1_idx]) if top1_idx >= 0 else float("-inf")
for metric, detail in metric_details.items()
}
formatted = format_results(decisions=decisions)
return formatted, decisions, final_scores, class_names
def run_reasoning_bundle_detailed(
embeddings: np.ndarray,
memory_bundle: MemoryBundle,
config: Dict[str, object] | None = None,
) -> Tuple[List[Dict[str, object]], List[Dict[str, object]], np.ndarray, List[str]]:
return _run_reasoning_with_bundle(
embeddings=embeddings,
memory_bundle=memory_bundle,
config=config,
)
def run_reasoning_bundle(
embeddings: np.ndarray,
memory_bundle: MemoryBundle,
config: Dict[str, object] | None = None,
) -> List[Dict[str, object]]:
formatted, _, _, _ = run_reasoning_bundle_detailed(
embeddings=embeddings,
memory_bundle=memory_bundle,
config=config,
)
return formatted
def run_reasoning_detailed(
embeddings: np.ndarray,
memory_path: str,
config: Dict[str, object] | None = None,
) -> Tuple[List[Dict[str, object]], List[Dict[str, object]], np.ndarray, List[str]]:
bundle = load_memory_bundle(memory_path)
return _run_reasoning_with_bundle(
embeddings=embeddings,
memory_bundle=bundle,
config=config,
)