-
Notifications
You must be signed in to change notification settings - Fork 29
Expand file tree
/
Copy pathdataset.py
More file actions
814 lines (664 loc) · 33.3 KB
/
dataset.py
File metadata and controls
814 lines (664 loc) · 33.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
from ..utils import DataikuException
from ..utils import DataikuUTF8CSVReader
from ..utils import DataikuStreamedHttpUTF8CSVReader
from .future import DSSFuture
import json, warnings
from .utils import DSSTaggableObjectListItem
from .future import DSSFuture
from .metrics import ComputedMetrics
from .discussion import DSSObjectDiscussions
from .statistics import DSSStatisticsWorksheet
from . import recipe
class DSSDatasetListItem(DSSTaggableObjectListItem):
"""An item in a list of datasets. Do not instantiate this class"""
def __init__(self, client, data):
super(DSSDatasetListItem, self).__init__(data)
self.client = client
def to_dataset(self):
"""Gets the :class:`DSSDataset` corresponding to this dataset"""
return DSSDataset(self.client, self._data["projectKey"], self._data["name"])
@property
def name(self):
return self._data["name"]
@property
def id(self):
return self._data["name"]
@property
def type(self):
return self._data["type"]
@property
def schema(self):
return self._data["schema"]
@property
def connection(self):
"""Returns the connection on which this dataset is attached, or None if there is no connection for this dataset"""
if not "params" in self._data:
return None
return self._data["params"].get("connection", None)
def get_column(self, column):
"""
Returns the schema column given a name.
:param str column: Column to find
:return a dict of the column settings or None if column does not exist
"""
matched = [col for col in self.schema["columns"] if col["name"] == column]
return None if len(matched) == 0 else matched[0]
class DSSDataset(object):
"""
A dataset on the DSS instance
"""
def __init__(self, client, project_key, dataset_name):
self.client = client
self.project = client.get_project(project_key)
self.project_key = project_key
self.dataset_name = dataset_name
########################################################
# Dataset deletion
########################################################
def delete(self, drop_data=False):
"""
Delete the dataset
:param bool drop_data: Should the data of the dataset be dropped
"""
return self.client._perform_empty(
"DELETE", "/projects/%s/datasets/%s" % (self.project_key, self.dataset_name), params = {
"dropData" : drop_data
})
########################################################
# Dataset definition
########################################################
def get_settings(self):
"""
Returns the settings of this dataset as a :class:`DSSDatasetSettings`, or one of its subclasses.
Know subclasses of :class:`DSSDatasetSettings` include :class:`FSLikeDatasetSettings`
and :class:`SQLDatasetSettings`
You must use :meth:`~DSSDatasetSettings.save()` on the returned object to make your changes effective
on the dataset.
.. code-block:: python
# Example: activating discrete partitioning on a SQL dataset
dataset = project.get_dataset("my_database_table")
settings = dataset.get_settings()
settings.add_discrete_partitioning_dimension("country")
settings.save()
:rtype: :class:`DSSDatasetSettings`
"""
data = self.client._perform_json("GET", "/projects/%s/datasets/%s" % (self.project_key, self.dataset_name))
if data["type"] in self.__class__.FS_TYPES:
return FSLikeDatasetSettings(self, data)
elif data["type"] in self.__class__.SQL_TYPES:
return SQLDatasetSettings(self, data)
else:
return DSSDatasetSettings(self, data)
def get_definition(self):
"""
Deprecated. Use :meth:`get_settings`
Get the raw settings of the dataset as a dict
:rtype: dict
"""
warnings.warn("Dataset.get_definition is deprecated, please use get_settings", DeprecationWarning)
return self.client._perform_json(
"GET", "/projects/%s/datasets/%s" % (self.project_key, self.dataset_name))
def set_definition(self, definition):
"""
Deprecated. Use :meth:`get_settings` and :meth:`DSSDatasetSettings.save`
Set the definition of the dataset
:param definition: the definition, as a dict. You should only set a definition object
that has been retrieved using the get_definition call.
"""
warnings.warn("Dataset.set_definition is deprecated, please use get_settings", DeprecationWarning)
return self.client._perform_json(
"PUT", "/projects/%s/datasets/%s" % (self.project_key, self.dataset_name),
body=definition)
def exists(self):
"""Returns whether this dataset exists"""
try:
self.get_metadata()
return True
except Exception as e:
return False
########################################################
# Dataset metadata
########################################################
def get_schema(self):
"""
Get the schema of the dataset
Returns:
a JSON object of the schema, with the list of columns
"""
return self.client._perform_json(
"GET", "/projects/%s/datasets/%s/schema" % (self.project_key, self.dataset_name))
def set_schema(self, schema):
"""
Set the schema of the dataset
Args:
schema: the desired schema for the dataset, as a JSON object. All columns have to provide their
name and type
"""
return self.client._perform_json(
"PUT", "/projects/%s/datasets/%s/schema" % (self.project_key, self.dataset_name),
body=schema)
def get_metadata(self):
"""
Get the metadata attached to this dataset. The metadata contains label, description
checklists, tags and custom metadata of the dataset
Returns:
a dict object. For more information on available metadata, please see
https://doc.dataiku.com/dss/api/5.0/rest/
"""
return self.client._perform_json(
"GET", "/projects/%s/datasets/%s/metadata" % (self.project_key, self.dataset_name))
def set_metadata(self, metadata):
"""
Set the metadata on this dataset.
Args:
metadata: the new state of the metadata for the dataset. You should only set a metadata object
that has been retrieved using the get_metadata call.
"""
return self.client._perform_json(
"PUT", "/projects/%s/datasets/%s/metadata" % (self.project_key, self.dataset_name),
body=metadata)
########################################################
# Dataset data
########################################################
def iter_rows(self, partitions=None):
"""
Get the dataset's data
Return:
an iterator over the rows, each row being a tuple of values. The order of values
in the tuples is the same as the order of columns in the schema returned by get_schema
"""
csv_stream = self.client._perform_raw(
"GET" , "/projects/%s/datasets/%s/data/" %(self.project_key, self.dataset_name),
params = {
"format" : "tsv-excel-noheader",
"partitions" : partitions
})
return DataikuStreamedHttpUTF8CSVReader(self.get_schema()["columns"], csv_stream).iter_rows()
def list_partitions(self):
"""
Get the list of all partitions of this dataset
Returns:
the list of partitions, as a list of strings
"""
return self.client._perform_json(
"GET", "/projects/%s/datasets/%s/partitions" % (self.project_key, self.dataset_name))
def clear(self, partitions=None):
"""
Clear all data in this dataset
Args:
partitions: (optional) a list of partitions to clear. When not provided, the entire dataset
is cleared
"""
return self.client._perform_json(
"DELETE", "/projects/%s/datasets/%s/data" % (self.project_key, self.dataset_name),
params={"partitions" : partitions})
def copy_to(self, target, sync_schema=True, write_mode="OVERWRITE"):
"""
Copies the data of this dataset to another dataset
:param target Dataset: a :class:`dataikuapi.dss.dataset.DSSDataset` representing the target of this copy
:returns: a DSSFuture representing the operation
"""
dqr = {
"targetProjectKey" : target.project_key,
"targetDatasetName": target.dataset_name,
"syncSchema": sync_schema,
"writeMode" : write_mode
}
future_resp = self.client._perform_json("POST", "/projects/%s/datasets/%s/actions/copyTo" % (self.project_key, self.dataset_name), body=dqr)
return DSSFuture(self.client, future_resp.get("jobId", None), future_resp)
########################################################
# Dataset actions
########################################################
def build(self, job_type="NON_RECURSIVE_FORCED_BUILD", partitions=None, wait=True, no_fail=False):
"""
Starts a new job to build this dataset and wait for it to complete.
Raises if the job failed.
.. code-block:: python
job = dataset.build()
print("Job %s done" % job.id)
:param job_type: The job type. One of RECURSIVE_BUILD, NON_RECURSIVE_FORCED_BUILD or RECURSIVE_FORCED_BUILD
:param partitions: If the dataset is partitioned, a list of partition ids to build
:param no_fail: if True, does not raise if the job failed.
:return: the :class:`dataikuapi.dss.job.DSSJob` job handle corresponding to the built job
:rtype: :class:`dataikuapi.dss.job.DSSJob`
"""
jd = self.project.new_job(job_type)
jd.with_output(self.dataset_name, partition=partitions)
if wait:
return jd.start_and_wait()
else:
return jd.start()
def synchronize_hive_metastore(self):
"""
Synchronize this dataset with the Hive metastore
"""
self.client._perform_empty(
"POST" , "/projects/%s/datasets/%s/actions/synchronizeHiveMetastore" %(self.project_key, self.dataset_name))
def update_from_hive(self):
"""
Resynchronize this dataset from its Hive definition
"""
self.client._perform_empty(
"POST", "/projects/%s/datasets/%s/actions/updateFromHive" %(self.project_key, self.dataset_name))
def compute_metrics(self, partition='', metric_ids=None, probes=None):
"""
Compute metrics on a partition of this dataset.
If neither metric ids nor custom probes set are specified, the metrics
setup on the dataset are used.
"""
url = "/projects/%s/datasets/%s/actions" % (self.project_key, self.dataset_name)
if metric_ids is not None:
return self.client._perform_json(
"POST" , "%s/computeMetricsFromIds" % url,
params={'partition':partition}, body={"metricIds" : metric_ids})
elif probes is not None:
return self.client._perform_json(
"POST" , "%s/computeMetrics" % url,
params={'partition':partition}, body=probes)
else:
return self.client._perform_json(
"POST" , "%s/computeMetrics" % url,
params={'partition':partition})
def run_checks(self, partition='', checks=None):
"""
Run checks on a partition of this dataset. If the checks are not specified, the checks
setup on the dataset are used.
"""
if checks is None:
return self.client._perform_json(
"POST" , "/projects/%s/datasets/%s/actions/runChecks" %(self.project_key, self.dataset_name),
params={'partition':partition})
else:
return self.client._perform_json(
"POST" , "/projects/%s/datasets/%s/actions/runChecks" %(self.project_key, self.dataset_name),
params={'partition':partition}, body=checks)
def uploaded_add_file(self, fp, filename):
"""
Adds a file to an "uploaded files" dataset
:param file fp: A file-like object that represents the file to upload
:param str filename: The filename for the file to upload
"""
self.client._perform_empty("POST", "/projects/%s/datasets/%s/uploaded/files" % (self.project_key, self.dataset_name),
files={"file":(filename, fp)})
def uploaded_list_files(self):
"""
List the files in an "uploaded files" dataset
"""
return self.client._perform_json("GET", "/projects/%s/datasets/%s/uploaded/files" % (self.project_key, self.dataset_name))
########################################################
# Lab and ML
# Don't forget to synchronize with DSSProject.*
########################################################
def create_prediction_ml_task(self, target_variable,
ml_backend_type="PY_MEMORY",
guess_policy="DEFAULT",
prediction_type=None,
wait_guess_complete=True):
"""Creates a new prediction task in a new visual analysis lab
for a dataset.
:param string input_dataset: the dataset to use for training/testing the model
:param string target_variable: the variable to predict
:param string ml_backend_type: ML backend to use, one of PY_MEMORY, MLLIB or H2O
:param string guess_policy: Policy to use for setting the default parameters. Valid values are: DEFAULT, SIMPLE_FORMULA, DECISION_TREE, EXPLANATORY and PERFORMANCE
:param string prediction_type: The type of prediction problem this is. If not provided the prediction type will be guessed. Valid values are: BINARY_CLASSIFICATION, REGRESSION, MULTICLASS
:param boolean wait_guess_complete: if False, the returned ML task will be in 'guessing' state, i.e. analyzing the input dataset to determine feature handling and algorithms.
You should wait for the guessing to be completed by calling
``wait_guess_complete`` on the returned object before doing anything
else (in particular calling ``train`` or ``get_settings``)
"""
return self.project.create_prediction_ml_task(self.dataset_name,
target_variable = target_variable, ml_backend_type = ml_backend_type,
guess_policy = guess_policy, prediction_type = prediction_type, wait_guess_complete = wait_guess_complete)
def create_clustering_ml_task(self, input_dataset,
ml_backend_type = "PY_MEMORY",
guess_policy = "KMEANS"):
"""Creates a new clustering task in a new visual analysis lab
for a dataset.
The returned ML task will be in 'guessing' state, i.e. analyzing
the input dataset to determine feature handling and algorithms.
You should wait for the guessing to be completed by calling
``wait_guess_complete`` on the returned object before doing anything
else (in particular calling ``train`` or ``get_settings``)
:param string ml_backend_type: ML backend to use, one of PY_MEMORY, MLLIB or H2O
:param string guess_policy: Policy to use for setting the default parameters. Valid values are: KMEANS and ANOMALY_DETECTION
"""
return self.project.create_clustering_ml_task(self.dataset_name,
ml_backend_type = ml_backend_type, guess_policy = guess_policy)
def create_analysis(self):
"""
Creates a new visual analysis lab
"""
return self.project_create_analysis(self.dataset_name)
def list_analyses(self):
"""
List the visual analyses on this dataset
:return list of dicts
"""
analysis_list = self.project.list_analyses()
return [desc for desc in analysis_list if self.dataset_name == desc.get('inputDataset')]
########################################################
# Statistics worksheets
########################################################
def list_statistics_worksheets(self, as_objects=True):
"""
List the statistics worksheets associated to this dataset.
:rtype: list of :class:`dataikuapi.dss.statistics.DSSStatisticsWorksheet`
"""
worksheets = self.client._perform_json(
"GET", "/projects/%s/datasets/%s/statistics/worksheets/" % (self.project_key, self.dataset_name))
if as_objects:
return [self.get_statistics_worksheet(worksheet['id']) for worksheet in worksheets]
else:
return worksheets
def create_statistics_worksheet(self, name="My worksheet"):
"""
Create a new worksheet in the dataset, and return a handle to interact with it.
:param string input_dataset: input dataset of the worksheet
:param string worksheet_name: name of the worksheet
Returns:
A :class:`dataikuapi.dss.statistics.DSSStatisticsWorksheet` dataset handle
"""
worksheet_definition = {
"projectKey": self.project_key,
"name": name,
"dataSpec": {
"inputDatasetSmartName": self.dataset_name,
"datasetSelection": {
"partitionSelectionMethod": "ALL",
"maxRecords": 30000,
"samplingMethod": "FULL"
}
}
}
created_worksheet = self.client._perform_json(
"POST", "/projects/%s/datasets/%s/statistics/worksheets/" % (self.project_key, self.dataset_name),
body=worksheet_definition
)
return self.get_statistics_worksheet(created_worksheet['id'])
def get_statistics_worksheet(self, worksheet_id):
"""
Get a handle to interact with a statistics worksheet
:param string worksheet_id: the ID of the desired worksheet
:returns: A :class:`dataikuapi.dss.statistics.DSSStatisticsWorksheet` worksheet handle
"""
return DSSStatisticsWorksheet(self.client, self.project_key, self.dataset_name, worksheet_id)
########################################################
# Metrics
########################################################
def get_last_metric_values(self, partition=''):
"""
Get the last values of the metrics on this dataset
Returns:
a list of metric objects and their value
"""
return ComputedMetrics(self.client._perform_json(
"GET", "/projects/%s/datasets/%s/metrics/last/%s" % (self.project_key, self.dataset_name, 'NP' if len(partition) == 0 else partition)))
def get_metric_history(self, metric, partition=''):
"""
Get the history of the values of the metric on this dataset
Returns:
an object containing the values of the metric, cast to the appropriate type (double, boolean,...)
"""
return self.client._perform_json(
"GET", "/projects/%s/datasets/%s/metrics/history/%s" % (self.project_key, self.dataset_name, 'NP' if len(partition) == 0 else partition),
params={'metricLookup' : metric if isinstance(metric, str) or isinstance(metric, unicode) else json.dumps(metric)})
########################################################
# Usages
########################################################
def get_usages(self):
"""
Get the recipes or analyses referencing this dataset
Returns:
a list of usages
"""
return self.client._perform_json("GET", "/projects/%s/datasets/%s/usages" % (self.project_key, self.dataset_name))
########################################################
# Discussions
########################################################
def get_object_discussions(self):
"""
Get a handle to manage discussions on the dataset
:returns: the handle to manage discussions
:rtype: :class:`dataikuapi.discussion.DSSObjectDiscussions`
"""
return DSSObjectDiscussions(self.client, self.project_key, "DATASET", self.dataset_name)
########################################################
# Test / Autofill
########################################################
FS_TYPES = ["Filesystem", "UploadedFiles", "FilesInFolder",
"HDFS", "S3", "Azure", "GCS", "FTP", "SCP", "SFTP"]
# HTTP is FSLike but not FS
SQL_TYPES = ["JDBC", "PostgreSQL", "MySQL", "Vertica", "Snowflake", "Redshift",
"Greenplum", "Teradata", "Oracle", "SQLServer", "SAPHANA", "Netezza",
"BigQuery", "Athena", "hiveserver2"]
def test_and_detect(self, infer_storage_types=False):
settings = self.get_settings()
if settings.type in self.__class__.FS_TYPES:
future_resp = self.client._perform_json("POST",
"/projects/%s/datasets/%s/actions/testAndDetectSettings/fsLike"% (self.project_key, self.dataset_name),
body = {"detectPossibleFormats" : True, "inferStorageTypes" : infer_storage_types })
return DSSFuture(self.client, future_resp.get('jobId', None), future_resp)
elif settings.type in self.__class__.SQL_TYPES:
return self.client._perform_json("POST",
"/projects/%s/datasets/%s/actions/testAndDetectSettings/externalSQL"% (self.project_key, self.dataset_name))
else:
raise ValueError("don't know how to test/detect on dataset type:%s" % settings.type)
def autodetect_settings(self, infer_storage_types=False):
settings = self.get_settings()
if settings.type in self.__class__.FS_TYPES:
future = self.test_and_detect(infer_storage_types)
result = future.wait_for_result()
if not "format" in result or not result["format"]["ok"]:
raise DataikuException("Format detection failed, complete response is " + json.dumps(result))
settings.get_raw()["formatType"] = result["format"]["type"]
settings.get_raw()["formatParams"] = result["format"]["params"]
settings.get_raw()["schema"] = result["format"]["schemaDetection"]["newSchema"]
return settings
elif settings.type in self.__class__.SQL_TYPES:
result = self.test_and_detect()
if not "schemaDetection" in result:
raise DataikuException("Format detection failed, complete response is " + json.dumps(result))
settings.get_raw()["schema"] = result["schemaDetection"]["newSchema"]
return settings
else:
raise ValueError("don't know how to test/detect on dataset type:%s" % settings.type)
def get_as_core_dataset(self):
import dataiku
return dataiku.Dataset("%s.%s" % (self.project_key, self.dataset_name))
########################################################
# Creation of recipes
########################################################
def new_code_recipe(self, type, code=None, recipe_name=None):
"""
Starts creation of a new code recipe taking this dataset as input
:param str type: Type of the recipe ('python', 'r', 'pyspark', 'sparkr', 'sql', 'sparksql', 'hive', ...)
:param str code: The code of the recipe
"""
if type == "python":
builder = recipe.PythonRecipeCreator(recipe_name, self.project)
else:
builder = recipe.CodeRecipeCreator(recipe_name, type, self.project)
builder.with_input(self.dataset_name)
if code is not None:
builder.with_script(code)
return builder
def new_recipe(self, type, recipe_name=None):
"""
Starts creation of a new recipe taking this dataset as input.
For more details, please see :meth:`dataikuapi.dss.project.DSSProject.new_recipe`
:param str type: Type of the recipe
"""
builder = self.project.new_recipe(type=type, name=recipe_name)
builder.with_input(self.dataset_name)
return builder
def list_snapshots(self):
"""
Lists the data snapshots of the project containing data for this dataset
"""
snapshots = self.client._perform_json("GET",
"/projects/%s/datasets/%s/snapshots"% (self.project_key, self.dataset_name))
return [DSSDatasetSnapshot(self, snapshot) for snapshot in snapshots]
def restore_snapshot(self, snapshot_id, target_name, target_settings):
future_response = self.client._perform_json("POST",
"/projects/%s/datasets/%s/snapshots/%s/actions/restoreToNew"% (self.project_key, self.dataset_name, snapshot_id),
body = {"creationSettings" : target_settings})
return DSSFuture(self.client, future_response.get('jobId', None), future_response)
class DSSDatasetSnapshot(object):
"""
A reference to a data snapshot of a project containing data for a particular dataset.
Do not instantiate this class, use :meth:`DSSDataset.list_snapshots`
"""
def __init__(self, dataset, snapshot):
self.dataset = dataset
self.snapshot = snapshot
@property
def id(self):
"""Snapshot id"""
return self.snapshot["bundleId"]
@property
def type(self):
"""
Type of this data snapshot. Either DATA_SNAPSHOT for a pure data snapshot or BUNDLE for a
full project bundle
"""
return self.snapshot["exportManifest"]["exportType"]
@property
def git_commit_info(self):
"""
Details about the last commit of the project prior to the snapshot, if available
Returns a dict, that may be empty if information is not available
"""
return self.snapshot["exportManifest"].get("gitCommitInfo", {})
def restore_to_new_managed_dataset(self, name, connection, type=None, format=None):
"""
Restores data from this snapshot to a new managed dataset.
Returns a future to wait for the restore task to complete
:param str name: name of the dataset to create
:param str connection: name of the connection to create the dataset on
:param str type: type of dataset, for connection where the type could be ambiguous. Typically,
this is SCP or SFTP, for SSH connection
:param str format: name of a format preset relevant for the dataset type. Possible values are: CSV_ESCAPING_NOGZIP_FORHIVE,
CSV_UNIX_GZIP, CSV_EXCEL_GZIP, CSV_EXCEL_GZIP_BIGQUERY, CSV_NOQUOTING_NOGZIP_FORPIG, PARQUET_HIVE,
AVRO, ORC. If None, uses the default
:rtype: `dataikuapi.dss.future.DSSFuture`
"""
ch = self.dataset.project.new_managed_dataset_creation_helper(name)
ch.with_store_into(connection, type_option_id=type, format_option_id=format)
future_response = self.dataset.client._perform_json("POST",
"/projects/%s/datasets/%s/snapshots/%s/actions/restoreToNew"% (self.dataset.project_key, self.dataset.dataset_name, self.id),
body = {"creationSettings" : ch.creation_settings})
return DSSFuture(self.dataset.client, future_response.get('jobId', None), future_response,
lambda x: DSSDataset(self.dataset.client, self.dataset.project_key, name))
class DSSDatasetSettings(object):
def __init__(self, dataset, settings):
self.dataset = dataset
self.settings = settings
def get_raw(self):
"""Get the raw dataset settings as a dict"""
return self.settings
def get_raw_params(self):
"""Get the type-specific params, as a raw dict"""
return self.settings["params"]
@property
def type(self):
return self.settings["type"]
def remove_partitioning(self):
self.settings["partitioning"] = {"dimensions" : []}
def add_discrete_partitioning_dimension(self, dim_name):
self.settings["partitioning"]["dimensions"].append({"name": dim_name, "type": "value"})
def add_time_partitioning_dimension(self, dim_name, period="DAY"):
self.settings["partitioning"]["dimensions"].append({"name": dim_name, "type": "time", "params":{"period": period}})
def add_raw_schema_column(self, column):
self.settings["schema"]["columns"].append(column)
def save(self):
self.dataset.client._perform_empty(
"PUT", "/projects/%s/datasets/%s" % (self.dataset.project_key, self.dataset.dataset_name),
body=self.settings)
class FSLikeDatasetSettings(DSSDatasetSettings):
def __init__(self, dataset, settings):
super(FSLikeDatasetSettings, self).__init__(dataset, settings)
def set_connection_and_path(self, connection, path):
self.settings["params"]["connection"] = connection
self.settings["params"]["path"] = path
def get_raw_format_params(self):
"""Get the raw format parameters as a dict"""
return self.settings["formatParams"]
def set_format(self, format_type, format_params = None):
if format_params is None:
format_params = {}
self.settings["formatType"] = format_type
self.settings["formatParams"] = format_params
def set_csv_format(self, separator=",", style="excel", skip_rows_before=0, header_row=True, skip_rows_after=0):
format_params = {
"style" : style,
"separator": separator,
"skipRowsBeforeHeader": skip_rows_before,
"parseHeaderRow": header_row,
"skipRowsAfterHeader": skip_rows_after
}
self.set_format("csv", format_params)
def set_partitioning_file_pattern(self, pattern):
self.settings["partitioning"]["filePathPattern"] = pattern
class SQLDatasetSettings(DSSDatasetSettings):
def __init__(self, dataset, settings):
super(SQLDatasetSettings, self).__init__(dataset, settings)
def set_table(self, connection, schema, table):
"""Sets this SQL dataset in 'table' mode, targeting a particular table of a connection"""
self.settings["params"].update({
"connection": connection,
"mode": "table",
"schema": schema,
"table": table
})
class DSSManagedDatasetCreationHelper(object):
def __init__(self, project, dataset_name):
self.project = project
self.dataset_name = dataset_name
self.creation_settings = { "specificSettings" : {} }
def get_creation_settings(self):
return self.creation_settings
def with_store_into(self, connection, type_option_id = None, format_option_id = None):
"""
Sets the connection into which to store the new managed dataset
:param str connection: Name of the connection to store into
:param str type_option_id: If the connection accepts several types of datasets, the type
:param str format_option_id: Optional identifier of a file format option
:return: self
"""
self.creation_settings["connectionId"] = connection
if type_option_id is not None:
self.creation_settings["typeOptionId"] = type_option_id
if format_option_id is not None:
self.creation_settings["specificSettings"]["formatOptionId"] = format_option_id
return self
def with_copy_partitioning_from(self, dataset_ref, object_type='DATASET'):
"""
Sets the new managed dataset to use the same partitioning as an existing dataset_name
:param str dataset_ref: Name of the dataset to copy partitioning from
:return: self
"""
code = 'dataset' if object_type == 'DATASET' else 'folder'
self.creation_settings["partitioningOptionId"] = "copy:%s:%s" % (code, dataset_ref)
return self
def create(self, overwrite=False):
"""
Executes the creation of the managed dataset according to the selected options
:param overwrite: If the dataset being created already exists, delete it first (removing data)
:return: The :class:`DSSDataset` corresponding to the newly created dataset
"""
if overwrite and self.already_exists():
self.project.get_dataset(self.dataset_name).delete(drop_data = True)
self.project.client._perform_json("POST", "/projects/%s/datasets/managed" % self.project.project_key,
body = {
"name": self.dataset_name,
"creationSettings": self.creation_settings
})
return DSSDataset(self.project.client, self.project.project_key, self.dataset_name)
def already_exists(self):
"""Returns whether this managed dataset already exists"""
dataset = self.project.get_dataset(self.dataset_name)
try:
dataset.get_metadata()
return True
except Exception as e:
return False