|
122 | 122 | "metadata": {}, |
123 | 123 | "outputs": [], |
124 | 124 | "source": [ |
125 | | - "reference_array = np.array([\n", |
126 | | - " [0,1,0,0,0],\n", |
127 | | - " [1,1,0,0,0],\n", |
128 | | - " [0,0,0,2,0],\n", |
129 | | - " [0,0,2,2,2],\n", |
130 | | - " [0,0,0,2,0],\n", |
131 | | - "])\n", |
| 125 | + "reference_array = np.array(\n", |
| 126 | + " [\n", |
| 127 | + " [0, 1, 0, 0, 0],\n", |
| 128 | + " [1, 1, 0, 0, 0],\n", |
| 129 | + " [0, 0, 0, 2, 0],\n", |
| 130 | + " [0, 0, 2, 2, 2],\n", |
| 131 | + " [0, 0, 0, 2, 0],\n", |
| 132 | + " ]\n", |
| 133 | + ")\n", |
132 | 134 | "\n", |
133 | 135 | "# Lets give them names, in practice this should be sample/subject names for easy recognition\n", |
134 | 136 | "predictions = {\n", |
135 | | - " \"subject_perfect\": np.array([\n", |
136 | | - " [0,1,0,0,0],\n", |
137 | | - " [1,1,0,0,0],\n", |
138 | | - " [0,0,0,2,0],\n", |
139 | | - " [0,0,2,2,2],\n", |
140 | | - " [0,0,0,2,0],\n", |
141 | | - " ]),\n", |
142 | | - " \"subject_horrible\": np.array([\n", |
143 | | - " [0,0,0,0,0],\n", |
144 | | - " [1,0,0,0,0],\n", |
145 | | - " [0,0,0,0,2],\n", |
146 | | - " [0,0,0,0,2],\n", |
147 | | - " [0,0,0,0,2],\n", |
148 | | - " ]),\n", |
149 | | - " \"subject_overprediction\": np.array([\n", |
150 | | - " [0,1,0,0,0],\n", |
151 | | - " [1,1,1,0,0],\n", |
152 | | - " [0,1,2,2,2],\n", |
153 | | - " [0,0,2,2,2],\n", |
154 | | - " [0,0,0,2,2],\n", |
155 | | - " ]),\n", |
| 137 | + " \"subject_perfect\": np.array(\n", |
| 138 | + " [\n", |
| 139 | + " [0, 1, 0, 0, 0],\n", |
| 140 | + " [1, 1, 0, 0, 0],\n", |
| 141 | + " [0, 0, 0, 2, 0],\n", |
| 142 | + " [0, 0, 2, 2, 2],\n", |
| 143 | + " [0, 0, 0, 2, 0],\n", |
| 144 | + " ]\n", |
| 145 | + " ),\n", |
| 146 | + " \"subject_horrible\": np.array(\n", |
| 147 | + " [\n", |
| 148 | + " [0, 0, 0, 0, 0],\n", |
| 149 | + " [1, 0, 0, 0, 0],\n", |
| 150 | + " [0, 0, 0, 0, 2],\n", |
| 151 | + " [0, 0, 0, 0, 2],\n", |
| 152 | + " [0, 0, 0, 0, 2],\n", |
| 153 | + " ]\n", |
| 154 | + " ),\n", |
| 155 | + " \"subject_overprediction\": np.array(\n", |
| 156 | + " [\n", |
| 157 | + " [0, 1, 0, 0, 0],\n", |
| 158 | + " [1, 1, 1, 0, 0],\n", |
| 159 | + " [0, 1, 2, 2, 2],\n", |
| 160 | + " [0, 0, 2, 2, 2],\n", |
| 161 | + " [0, 0, 0, 2, 2],\n", |
| 162 | + " ]\n", |
| 163 | + " ),\n", |
156 | 164 | "}" |
157 | 165 | ] |
158 | 166 | }, |
|
182 | 190 | "source": [ |
183 | 191 | "# let's calculate one sample as usual\n", |
184 | 192 | "\n", |
185 | | - "# Let's define that label 1 and 2 (see arrays above) should be treated as different groups \n", |
| 193 | + "# Let's define that label 1 and 2 (see arrays above) should be treated as different groups\n", |
186 | 194 | "from panoptica.utils import SegmentationClassGroups, LabelGroup\n", |
| 195 | + "\n", |
187 | 196 | "# (in practice, this could be different classes of labels, instead of multiple instances of the same class)\n", |
188 | | - "segmentation_class_groups = SegmentationClassGroups({\"Structure1\": LabelGroup(1), \"Structure2\": LabelGroup(2)})\n", |
| 197 | + "segmentation_class_groups = SegmentationClassGroups(\n", |
| 198 | + " {\"Structure1\": LabelGroup(1), \"Structure2\": LabelGroup(2)}\n", |
| 199 | + ")\n", |
189 | 200 | "\n", |
190 | 201 | "evaluator = Panoptica_Evaluator(\n", |
191 | 202 | " expected_input=InputType.MATCHED_INSTANCE,\n", |
|
291 | 302 | "import os\n", |
292 | 303 | "\n", |
293 | 304 | "output_file = str(Path(os.path.abspath(\"\")).parent.joinpath(\"example_aggregation.tsv\"))\n", |
294 | | - "aggregator = Panoptica_Aggregator(panoptica_evaluator=evaluator, output_file=output_file, log_times=True,)\n", |
| 305 | + "aggregator = Panoptica_Aggregator(\n", |
| 306 | + " panoptica_evaluator=evaluator,\n", |
| 307 | + " output_file=output_file,\n", |
| 308 | + " log_times=True,\n", |
| 309 | + ")\n", |
295 | 310 | "print(aggregator.evaluation_metrics)" |
296 | 311 | ] |
297 | 312 | }, |
|
486 | 501 | ], |
487 | 502 | "source": [ |
488 | 503 | "# get a summary dictionary across groups and metrics\n", |
489 | | - "statistics_obj.get_summary_dict(include_across_group=False) # we set this to false because we only have on group, otherwise this would yield also the averages across all groups" |
| 504 | + "statistics_obj.get_summary_dict(\n", |
| 505 | + " include_across_group=False\n", |
| 506 | + ") # we set this to false because we only have on group, otherwise this would yield also the averages across all groups" |
490 | 507 | ] |
491 | 508 | }, |
492 | 509 | { |
|
2425 | 2442 | "source": [ |
2426 | 2443 | "# We can also make plots over multiple statistics objects (usually reflecting different algorithms/predictors)\n", |
2427 | 2444 | "from panoptica.panoptica_statistics import make_curve_over_setups\n", |
| 2445 | + "\n", |
2428 | 2446 | "# we simulate this by multiplying our statistics object\n", |
2429 | 2447 | "make_curve_over_setups(\n", |
2430 | | - " statistics_dict = {\n", |
| 2448 | + " statistics_dict={\n", |
2431 | 2449 | " \"algorithm1\": statistics_obj,\n", |
2432 | 2450 | " \"algorithm2\": statistics_obj,\n", |
2433 | 2451 | " \"algorithm3\": statistics_obj,\n", |
2434 | | - " },\n", |
| 2452 | + " },\n", |
2435 | 2453 | " metric=\"global_bin_dsc\",\n", |
2436 | 2454 | ")\n", |
2437 | 2455 | "# of course, as we use the same statistic object multiple times, each pair of bars is identical" |
|
0 commit comments