|
| 1 | +import json |
| 2 | + |
| 3 | +from dataikuapi.dss.metrics import ComputedMetrics |
1 | 4 | from .discussion import DSSObjectDiscussions |
2 | 5 |
|
3 | 6 | from requests import utils |
@@ -118,7 +121,7 @@ def list_model_evaluations(self, as_type=None): |
118 | 121 | :rtype: list |
119 | 122 | """ |
120 | 123 | items = self.client._perform_json("GET", "/projects/%s/modelevaluationstores/%s/runs/" % (self.project_key, self.mes_id)) |
121 | | - if as_type == "objects" or as_type == "object": |
| 124 | + if as_type in ["objects", "object"]: |
122 | 125 | return [DSSModelEvaluation(self, item["ref"]["runId"]) for item in items] |
123 | 126 | else: |
124 | 127 | return items |
@@ -171,6 +174,53 @@ def create_model_evaluation(self, labels=None, prediction_type=None, model_type= |
171 | 174 | return DSSModelEvaluation(self, run_id) |
172 | 175 |
|
173 | 176 |
|
| 177 | + ######################################################## |
| 178 | + # Metrics |
| 179 | + ######################################################## |
| 180 | + |
| 181 | + def get_last_metric_values(self): |
| 182 | + """ |
| 183 | + Get the metrics of the latest model evaluation built |
| 184 | +
|
| 185 | + Returns: |
| 186 | + a list of metric objects and their value |
| 187 | + """ |
| 188 | + return ComputedMetrics(self.client._perform_json( |
| 189 | + "GET", "/projects/%s/modelevaluationstores/%s/metrics/last" % (self.project_key, self.mes_id))) |
| 190 | + |
| 191 | + |
| 192 | + def get_metric_history(self, metric): |
| 193 | + """ |
| 194 | + Get the history of the values of the metric on this model evaluation store |
| 195 | +
|
| 196 | + Returns: |
| 197 | + an object containing the values of the metric, cast to the appropriate type (double, boolean,...) |
| 198 | + """ |
| 199 | + return self.client._perform_json( |
| 200 | + "GET", "/projects/%s/modelevaluationstores/%s/metrics/history" % (self.project_key, self.mes_id), |
| 201 | + params={'metricLookup': metric if isinstance(metric, str)or isinstance(metric, unicode) |
| 202 | + else json.dumps(metric)}) |
| 203 | + |
| 204 | + def compute_metrics(self, metric_ids=None, probes=None): |
| 205 | + """ |
| 206 | + Compute metrics on this model evaluation store. If the metrics are not specified, the metrics |
| 207 | + setup on the model evaluation store are used. |
| 208 | + """ |
| 209 | + url = "/projects/%s/modelevaluationstores/%s/actions" % (self.project_key, self.mes_id) |
| 210 | + if metric_ids is not None: |
| 211 | + return self.client._perform_json( |
| 212 | + "POST" , "%s/computeMetricsFromIds" % url, |
| 213 | + body={"metricIds" : metric_ids}) |
| 214 | + elif probes is not None: |
| 215 | + return self.client._perform_json( |
| 216 | + "POST" , "%s/computeMetrics" % url, |
| 217 | + body=probes) |
| 218 | + else: |
| 219 | + return self.client._perform_json( |
| 220 | + "POST" , "%s/computeMetrics" % url) |
| 221 | + |
| 222 | + |
| 223 | + |
174 | 224 | class DSSModelEvaluationStoreSettings: |
175 | 225 | """ |
176 | 226 | A handle on the settings of a model evaluation store |
@@ -271,6 +321,15 @@ def put_file(self, path, f): |
271 | 321 | "POST", "/projects/%s/modelevaluationstores/%s/runs/%s/contents/%s" % (self.project_key, self.mes_id, self.run_id, utils.quote(path)), |
272 | 322 | "", f) |
273 | 323 |
|
| 324 | + def get_metrics(self): |
| 325 | + """ |
| 326 | + Get the metrics for this model evaluation |
| 327 | +
|
| 328 | + :return: the metrics, as a JSON object |
| 329 | + """ |
| 330 | + return self.client._perform_json( |
| 331 | + "GET", "/projects/%s/modelevaluationstores/%s/runs/%s/metrics" % (self.project_key, self.mes_id, self.run_id)) |
| 332 | + |
274 | 333 | class DSSModelEvaluationSettings: |
275 | 334 | """ |
276 | 335 | A handle on the settings of a model evaluation |
|
0 commit comments