text stringlengths 81 112k |
|---|
Simply returns the points in time that correspond to a whole number of minutes intervals.
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to a whole number of minutes intervals.
"""
# Validate that the minutes_interval can div... |
Simply returns the points in time that correspond to turn of month.
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to turn of month.
"""
start_date = self._align(finite_start)
aligned_stop = self._align(finite_stop)
... |
Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created.
def touch(self, connection=None):
"""
Mark this update as complete.
IMPORTANT, If the... |
Create a SQL Server connection and return a connection object
def connect(self):
"""
Create a SQL Server connection and return a connection object
"""
connection = _mssql.connect(user=self.user,
password=self.password,
... |
Create marker table if it doesn't exist.
Use a separate connection since the transaction might have to be reset.
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Use a separate connection since the transaction might have to be reset.
"""
connec... |
Retrieve an opener for the given protocol
:param name: name of the opener to open
:type name: string
:raises NoOpenerError: if no opener has been registered of that name
def get_opener(self, name):
"""Retrieve an opener for the given protocol
:param name: name of the opener to... |
Adds an opener to the registry
:param opener: Opener object
:type opener: Opener inherited object
def add(self, opener):
"""Adds an opener to the registry
:param opener: Opener object
:type opener: Opener inherited object
"""
index = len(self.openers)
... |
Open target uri.
:param target_uri: Uri to open
:type target_uri: string
:returns: Target object
def open(self, target_uri, **kwargs):
"""Open target uri.
:param target_uri: Uri to open
:type target_uri: string
:returns: Target object
"""
tar... |
Converts the query string from a target uri, uses
cls.allowed_kwargs, and cls.filter_kwargs to drive logic.
:param query: Unparsed query string
:type query: urllib.parse.unsplit(uri).query
:returns: Dictionary of parsed values, everything in cls.allowed_kwargs
with values se... |
Override this method to use values from the parsed uri to initialize
the expected target.
def get_target(cls, scheme, path, fragment, username,
password, hostname, port, query, **kwargs):
"""Override this method to use values from the parsed uri to initialize
the expected tar... |
:param tasks:
:param worker_scheduler_factory:
:param override_defaults:
:return: True if all tasks and their dependencies were successfully run (or already completed);
False if any error occurred. It will return a detailed response of type LuigiRunResult
instead of a boolean if de... |
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param use_dynamic_argparse: Deprecated and ignored
def run(*args, **kwargs):
"""
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param use_dynamic_argparse: Deprecated and ignored
... |
Run internally, bypassing the cmdline parsing.
Useful if you have some luigi code that you want to run internally.
Example:
.. code-block:: python
luigi.build([MyTask1(), MyTask2()], local_scheduler=True)
One notable difference is that `build` defaults to not using
the identical process ... |
Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created.
def touch(self, connection=None):
"""
Mark this update as complete.
IMPORTANT, If the... |
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
... |
Inserts data generated by rows() into target table.
If the target table doesn't exist, self.create_table will be called to attempt to create the table.
Normally you don't want to override this.
def run(self):
"""
Inserts data generated by rows() into target table.
If the targ... |
Coerce input arguments to use temporary files when used for output.
Return a list of temporary file pairs (tmpfile, destination path) and
a list of arguments.
Converts each HdfsTarget to a string for the path.
def fix_paths(job):
"""
Coerce input arguments to use temporary files when used for out... |
Get name of first active job queue
def get_active_queue(self):
"""Get name of first active job queue"""
# Get dict of active queues keyed by name
queues = {q['jobQueueName']: q for q in self._client.describe_job_queues()['jobQueues']
if q['state'] == 'ENABLED' and q['status']... |
Retrieve the first job ID matching the given name
def get_job_id_from_name(self, job_name):
"""Retrieve the first job ID matching the given name"""
jobs = self._client.list_jobs(jobQueue=self._queue, jobStatus='RUNNING')['jobSummaryList']
matching_jobs = [job for job in jobs if job['jobName'] =... |
Retrieve task statuses from ECS API
:param job_id (str): AWS Batch job uuid
Returns one of {SUBMITTED|PENDING|RUNNABLE|STARTING|RUNNING|SUCCEEDED|FAILED}
def get_job_status(self, job_id):
"""Retrieve task statuses from ECS API
:param job_id (str): AWS Batch job uuid
Returns ... |
Retrieve log stream from CloudWatch
def get_logs(self, log_stream_name, get_last=50):
"""Retrieve log stream from CloudWatch"""
response = self._log_client.get_log_events(
logGroupName='/aws/batch/job',
logStreamName=log_stream_name,
startFromHead=False)
even... |
Wrap submit_job with useful defaults
def submit_job(self, job_definition, parameters, job_name=None, queue=None):
"""Wrap submit_job with useful defaults"""
if job_name is None:
job_name = _random_id()
response = self._client.submit_job(
jobName=job_name,
job... |
Poll task status until STOPPED
def wait_on_job(self, job_id):
"""Poll task status until STOPPED"""
while True:
status = self.get_job_status(job_id)
if status == 'SUCCEEDED':
logger.info('Batch job {} SUCCEEDED'.format(job_id))
return True
... |
Register a job definition with AWS Batch, using a JSON
def register_job_definition(self, json_fpath):
"""Register a job definition with AWS Batch, using a JSON"""
with open(json_fpath) as f:
job_def = json.load(f)
response = self._client.register_job_definition(**job_def)
st... |
Run the work() method from the class instance in the file "job-instance.pickle".
def main(args=sys.argv):
"""Run the work() method from the class instance in the file "job-instance.pickle".
"""
try:
tarball = "--no-tarball" not in args
# Set up logging.
logging.basicConfig(level=log... |
Arbitrarily picks an object in input and reads the Avro schema from it.
def _get_input_schema(self):
"""Arbitrarily picks an object in input and reads the Avro schema from it."""
assert avro, 'avro module required'
input_target = flatten(self.input())[0]
input_fs = input_target.fs if h... |
Return a string representation of the tasks, their statuses/parameters in a dependency tree format
def print_tree(task, indent='', last=True):
'''
Return a string representation of the tasks, their statuses/parameters in a dependency tree format
'''
# dont bother printing out warnings about tasks with ... |
Join relative URLs to base URLs like urllib.parse.urljoin but support
arbitrary URIs (esp. 'http+unix://').
def _urljoin(base, url):
"""
Join relative URLs to base URLs like urllib.parse.urljoin but support
arbitrary URIs (esp. 'http+unix://').
"""
parsed = urlparse(base)
scheme = parsed.sc... |
Returns whether the given dataset exists.
If regional location is specified for the dataset, that is also checked
to be compatible with the remote dataset, otherwise an exception is thrown.
:param dataset:
:type dataset: BQDataset
def dataset_exists(self, dataset):
"""Ret... |
Returns whether the given table exists.
:param table:
:type table: BQTable
def table_exists(self, table):
"""Returns whether the given table exists.
:param table:
:type table: BQTable
"""
if not self.dataset_exists(table.dataset):
return... |
Creates a new dataset with the default permissions.
:param dataset:
:type dataset: BQDataset
:param raise_if_exists: whether to raise an exception if the dataset already exists.
:raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
def make_... |
Deletes a dataset (and optionally any tables in it), if it exists.
:param dataset:
:type dataset: BQDataset
:param delete_nonempty: if true, will delete any tables before deleting the dataset
def delete_dataset(self, dataset, delete_nonempty=True):
"""Deletes a dataset (and op... |
Deletes a table, if it exists.
:param table:
:type table: BQTable
def delete_table(self, table):
"""Deletes a table, if it exists.
:param table:
:type table: BQTable
"""
if not self.table_exists(table):
return
self.client.table... |
Returns the list of datasets in a given project.
:param project_id:
:type project_id: str
def list_datasets(self, project_id):
"""Returns the list of datasets in a given project.
:param project_id:
:type project_id: str
"""
request = self.client.da... |
Returns the list of tables in a given dataset.
:param dataset:
:type dataset: BQDataset
def list_tables(self, dataset):
"""Returns the list of tables in a given dataset.
:param dataset:
:type dataset: BQDataset
"""
request = self.client.tables().li... |
Returns the SQL query for a view, or None if it doesn't exist or is not a view.
:param table: The table containing the view.
:type table: BQTable
def get_view(self, table):
"""Returns the SQL query for a view, or None if it doesn't exist or is not a view.
:param table: The table conta... |
Updates the SQL query for a view.
If the output table exists, it is replaced with the supplied view query. Otherwise a new
table is created with this view.
:param table: The table to contain the view.
:type table: BQTable
:param view: The SQL query for the view.
:type v... |
Runs a BigQuery "job". See the documentation for the format of body.
.. note::
You probably don't need to use this directly. Use the tasks defined below.
:param dataset:
:type dataset: BQDataset
def run_job(self, project_id, body, dataset=None):
"""Runs a BigQu... |
Copies (or appends) a table to another table.
:param source_table:
:type source_table: BQTable
:param dest_table:
:type dest_table: BQTable
:param create_disposition: whether to create the table if needed
:type create_disposition: CreateDispositio... |
A constructor that takes a :py:class:`BQTable`.
:param table:
:type table: BQTable
def from_bqtable(cls, table, client=None):
"""A constructor that takes a :py:class:`BQTable`.
:param table:
:type table: BQTable
"""
return cls(table.project_id, tabl... |
The fully-qualified URIs that point to your data in Google Cloud Storage.
Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.
def source_uris(self):
"""The fully-qualified URIs that point to your data in Google Cloud Storage.
Each URI can contain one '*' ... |
The fully-qualified URIs that point to your data in Google Cloud
Storage. Each URI can contain one '*' wildcard character and it must
come after the 'bucket' name.
Wildcarded destinationUris in GCSQueryTarget might not be resolved
correctly and result in incomplete data. If a GCSQueryTa... |
Remote Popen.
def Popen(self, cmd, **kwargs):
"""
Remote Popen.
"""
prefixed_cmd = self._prepare_cmd(cmd)
return subprocess.Popen(prefixed_cmd, **kwargs) |
Execute a shell command remotely and return the output.
Simplified version of Popen when you only want the output as a string and detect any errors.
def check_output(self, cmd):
"""
Execute a shell command remotely and return the output.
Simplified version of Popen when you only want ... |
Open a tunnel between localhost:local_port and remote_host:remote_port via the host specified by this context.
Remember to close() the returned "tunnel" object in order to clean up
after yourself when you are done with the tunnel.
def tunnel(self, local_port, remote_port=None, remote_host="localhost")... |
Return `True` if directory at `path` exist, False otherwise.
def isdir(self, path):
"""
Return `True` if directory at `path` exist, False otherwise.
"""
try:
self.remote_context.check_output(["test", "-d", path])
except subprocess.CalledProcessError as e:
... |
Remove file or directory at location `path`.
def remove(self, path, recursive=True):
"""
Remove file or directory at location `path`.
"""
if recursive:
cmd = ["rm", "-r", path]
else:
cmd = ["rm", path]
self.remote_context.check_output(cmd) |
Returns command of process.
:param pid:
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
if os.name == "nt":
# Use wmic command instead of ps on Windows.
cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, )
with os.popen(... |
Makes sure the process is only run once at the same time with the same name.
Notice that we since we check the process name, different parameters to the same
command can spawn multiple processes at the same time, i.e. running
"/usr/bin/my_process" does not prevent anyone from launching
"/usr/bin/my_pro... |
Add a failure event with the current timestamp.
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_tim... |
Return the number of failures in the window.
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and self.failures[0] < min_time:
self.failures.popleft()
return len(self.failu... |
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the... |
Return how many tasks are PENDING + RUNNING. O(1).
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) |
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority ca... |
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_... |
Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
def _traverse_graph... |
Query for a subset of tasks by status.
def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None,
**kwargs):
"""
Query for a subset of tasks by status.
"""
if not search:
count_limit = max_shown_tasks or self._config.m... |
Resources usage info and their consumers (tasks).
def resource_list(self):
"""
Resources usage info and their consumers (tasks).
"""
self.prune()
resources = [
dict(
name=resource,
num_total=r_dict['total'],
num_used=r_... |
get total resources and available ones
def resources(self):
''' get total resources and available ones '''
used_resources = self._used_resources()
ret = collections.defaultdict(dict)
for resource, total in six.iteritems(self._resources):
ret[resource]['total'] = total
... |
Query for a subset of tasks by task_id.
:param task_str:
:return:
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
fo... |
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr:`fs`.
def exists(self):
"""
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr... |
A context manager that enables a reasonably short, general and
magic-less way to solve the :ref:`AtomicWrites`.
* On *entering*, it will create the parent directories so the
temporary_path is writeable right away.
This step uses :py:meth:`FileSystem.mkdir`.
* On *exiting... |
Generate an id for the indicator document.
def marker_index_document_id(self):
"""
Generate an id for the indicator document.
"""
params = '%s:%s:%s' % (self.index, self.doc_type, self.update_id)
return hashlib.sha1(params.encode('utf-8')).hexdigest() |
Mark this update as complete.
The document id would be sufficent but,
for documentation,
we index the parameters `update_id`, `target_index`, `target_doc_type` and `date` as well.
def touch(self):
"""
Mark this update as complete.
The document id would be sufficent but... |
Test, if this task has been run.
def exists(self):
"""
Test, if this task has been run.
"""
try:
self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id())
return True
except elasticsearch.NotFoundError:
... |
Create the index that will keep track of the tasks if necessary.
def create_marker_index(self):
"""
Create the index that will keep track of the tasks if necessary.
"""
if not self.es.indices.exists(index=self.marker_index):
self.es.indices.create(index=self.marker_index) |
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
def ensure_hist_size(self):
"""
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
"""
if self.marker_index_hist_s... |
Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`,
add those attributes here, if necessary.
def _docs(self):
"""
Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`,
add those attributes here, if necessary.
... |
Override to provide code for creating the target index.
By default it will be created without any special settings or mappings.
def create_index(self):
"""
Override to provide code for creating the target index.
By default it will be created without any special settings or mappings.
... |
Delete the index, if it exists.
def delete_index(self):
"""
Delete the index, if it exists.
"""
es = self._init_connection()
if es.indices.exists(index=self.index):
es.indices.delete(index=self.index) |
Returns a ElasticsearchTarget representing the inserted dataset.
Normally you don't override this.
def output(self):
"""
Returns a ElasticsearchTarget representing the inserted dataset.
Normally you don't override this.
"""
return ElasticsearchTarget(
host=... |
Run task, namely:
* purge existing index, if requested (`purge_existing_index`),
* create the index, if missing,
* apply mappings, if given,
* set refresh interval to -1 (disable) for performance reasons,
* bulk index in batches of size `chunk_size` (2000),
* set refresh... |
Gets the value of the section/option using method.
Returns default if value is not found.
Raises an exception if the default value is not None and doesn't match the expected_type.
def _get_with_default(self, method, section, option, default, expected_type=None, **kwargs):
"""
Gets the... |
Poll job status while active
def __track_job(self):
"""Poll job status while active"""
while not self.__verify_job_has_started():
time.sleep(self.__POLL_TIME)
self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start")
self.__print_kubectl_hints()
... |
Asserts that the job has successfully started
def __verify_job_has_started(self):
"""Asserts that the job has successfully started"""
# Verify that the job started
self.__get_job()
# Verify that the pod started
pods = self.__get_pods()
assert len(pods) > 0, "No pod sch... |
Return the Kubernetes job status
def __get_job_status(self):
"""Return the Kubernetes job status"""
# Figure out status and return it
job = self.__get_job()
if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0:
job.scale(replicas=0)
if self... |
Return an engine instance, creating it if it doesn't exist.
Recreate the engine connection if it wasn't originally created
by the current process.
def engine(self):
"""
Return an engine instance, creating it if it doesn't exist.
Recreate the engine connection if it wasn't orig... |
Mark this update as complete.
def touch(self):
"""
Mark this update as complete.
"""
if self.marker_table_bound is None:
self.create_marker_table()
table = self.marker_table_bound
id_exists = self.exists()
with self.engine.begin() as conn:
... |
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
... |
Override to provide code for creating the target table.
By default it will be created using types specified in columns.
If the table exists, then it binds to the existing table.
If overridden, use the provided connection object for setting up the table in order to
create the table and ... |
This method does the actual insertion of the rows of data given by ins_rows into the
database. A task that needs row updates instead of insertions should overload this method.
:param conn: The sqlalchemy connection object
:param ins_rows: The dictionary of rows with the keys in the format _<colu... |
Run the work() method from the class instance in the file "job-instance.pickle".
def main(args=sys.argv):
"""Run the work() method from the class instance in the file "job-instance.pickle".
"""
try:
# Set up logging.
logging.basicConfig(level=logging.WARN)
work_dir = args[1]
... |
Does not change self.path.
Unlike ``move_dir()``, ``rename()`` might cause nested directories.
See spotify/luigi#522
def rename(self, path, raise_if_exists=False):
"""
Does not change self.path.
Unlike ``move_dir()``, ``rename()`` might cause nested directories.
See sp... |
Alias for ``rename()``
def move(self, path, raise_if_exists=False):
"""
Alias for ``rename()``
"""
self.rename(path, raise_if_exists=raise_if_exists) |
Currently only works with hadoopcli
def is_writable(self):
"""
Currently only works with hadoopcli
"""
if "/" in self.path:
# example path: /log/ap/2013-01-17/00
parts = self.path.split("/")
# start with the full path and then up the tree until we can... |
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
def _partition_tasks(worker):
"""
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get... |
Add the "upstream_*" and "not_run" statuses my mutating set_tasks.
def _populate_unknown_statuses(set_tasks):
"""
Add the "upstream_*" and "not_run" statuses my mutating set_tasks.
"""
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visit... |
This dfs checks why tasks are still pending.
def _depth_first_search(set_tasks, current_task, visited):
"""
This dfs checks why tasks are still pending.
"""
visited.add(current_task)
if current_task in set_tasks["still_pending_not_ext"]:
upstream_failure = False
upstream_missing_dep... |
This returns a string for each status
def _get_str(task_dict, extra_indent):
"""
This returns a string for each status
"""
summary_length = execution_summary().summary_length
lines = []
task_names = sorted(task_dict.keys())
for task_family in task_names:
tasks = task_dict[task_fami... |
Checks if there is a continuous range
def _ranging_attributes(attributes, param_class):
"""
Checks if there is a continuous range
"""
next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes}
in_first = attributes.difference(next_attributes)
in_second = next_att... |
Get the human readable comments and quantities for the task types.
def _get_comments(group_tasks):
"""
Get the human readable comments and quantities for the task types.
"""
comments = {}
for status, human in _COMMENTS:
num_tasks = _get_number_of_tasks_for(status, group_tasks)
if nu... |
This returns a set of the tasks that are being run by other worker
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
task_sets = _get_external_workers(worker).values()
return functools.reduce(lambda a, b: a | b, task_sets, set()) |
This returns a dict with a set of tasks for all of the other workers
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
worker_that_blocked_task = collections.defaultdict(set)
get_work_response_history = worker._get_work_response_history
... |
Takes a dictionary with sets of tasks grouped by their status and
returns a dictionary with dictionaries with an array of tasks grouped by
their status and task name
def _group_tasks_by_name_and_status(task_dict):
"""
Takes a dictionary with sets of tasks grouped by their status and
returns a dicti... |
Given a grouped set of tasks, returns a LuigiStatusCode
def _tasks_status(set_tasks):
"""
Given a grouped set of tasks, returns a LuigiStatusCode
"""
if set_tasks["ever_failed"]:
if not set_tasks["failed"]:
return LuigiStatusCode.SUCCESS_WITH_RETRY
else:
if set_t... |
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
def task_id_str(task_family, pa... |
Returns an externalized version of a Task. You may both pass an
instantiated task object or a task class. Some examples:
.. code-block:: python
class RequiringTask(luigi.Task):
def requires(self):
task_object = self.clone(MyTask)
return externalize(task_obje... |
Maps all Tasks in a structured data object to their .output().
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
return struct.__class__((k, getpaths(v)) for... |
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
... |
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.