text stringlengths 81 112k |
|---|
The libs dir for a given arch.
def get_libs_dir(self, arch):
'''The libs dir for a given arch.'''
ensure_dir(join(self.libs_dir, arch))
return join(self.libs_dir, arch) |
make limited length string in form:
"the string is very lo...(and 15 more)"
def shorten_string(string, max_width):
''' make limited length string in form:
"the string is very lo...(and 15 more)"
'''
string_len = len(string)
if string_len <= max_width:
return string
visible = max... |
Runs the command (which should be an sh.Command instance), while
logging the output.
def shprint(command, *args, **kwargs):
'''Runs the command (which should be an sh.Command instance), while
logging the output.'''
kwargs["_iter"] = True
kwargs["_out_bufsize"] = 1
kwargs["_err_to_out"] = True
... |
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# ... |
error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
def error(self, message):
"""error(message: string)
Prints a ... |
Given an object, return a boolean indicating whether it is an instance
or subclass of :py:class:`Exception`.
def isexception(obj):
"""Given an object, return a boolean indicating whether it is an instance
or subclass of :py:class:`Exception`.
"""
if isinstance(obj, Exception):
return True
... |
Receives a row, converts datetimes to strings.
def _reduce_datetimes(row):
"""Receives a row, converts datetimes to strings."""
row = list(row)
for i in range(len(row)):
if hasattr(row[i], 'isoformat'):
row[i] = row[i].isoformat()
return tuple(row) |
Returns the row as a dictionary, as ordered.
def as_dict(self, ordered=False):
"""Returns the row as a dictionary, as ordered."""
items = zip(self.keys(), self.values())
return OrderedDict(items) if ordered else dict(items) |
A Tablib Dataset containing the row.
def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data |
A Tablib Dataset representation of the RecordCollection.
def dataset(self):
"""A Tablib Dataset representation of the RecordCollection."""
# Create a new Tablib Dataset.
data = tablib.Dataset()
# If the RecordCollection is empty, just return the empty set
# Check number of rows... |
Returns a list of all rows for the RecordCollection. If they haven't
been fetched yet, consume the iterator and cache the results.
def all(self, as_dict=False, as_ordereddict=False):
"""Returns a list of all rows for the RecordCollection. If they haven't
been fetched yet, consume the iterator a... |
Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it.
def first(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, or `default`. If
... |
Returns a single record for the RecordCollection, ensuring that it
is the only record, or returns `default`. If `default` is an instance
or subclass of Exception, then raise it instead of returning it.
def one(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record ... |
Get a connection to this Database. Connections are retrieved from a
pool.
def get_connection(self):
"""Get a connection to this Database. Connections are retrieved from a
pool.
"""
if not self.open:
raise exc.ResourceClosedError('Database closed.')
return Co... |
Executes the given SQL query against the Database. Parameters can,
optionally, be provided. Returns a RecordCollection, which can be
iterated over to get result rows as dictionaries.
def query(self, query, fetchall=False, **params):
"""Executes the given SQL query against the Database. Paramete... |
Bulk insert or update.
def bulk_query(self, query, *multiparams):
"""Bulk insert or update."""
with self.get_connection() as conn:
conn.bulk_query(query, *multiparams) |
Like Database.query, but takes a filename to load a query from.
def query_file(self, path, fetchall=False, **params):
"""Like Database.query, but takes a filename to load a query from."""
with self.get_connection() as conn:
return conn.query_file(path, fetchall, **params) |
Like Database.bulk_query, but takes a filename to load a query from.
def bulk_query_file(self, path, *multiparams):
"""Like Database.bulk_query, but takes a filename to load a query from."""
with self.get_connection() as conn:
conn.bulk_query_file(path, *multiparams) |
A context manager for executing a transaction on this Database.
def transaction(self):
"""A context manager for executing a transaction on this Database."""
conn = self.get_connection()
tx = conn.transaction()
try:
yield conn
tx.commit()
except:
... |
Executes the given SQL query against the connected Database.
Parameters can, optionally, be provided. Returns a RecordCollection,
which can be iterated over to get result rows as dictionaries.
def query(self, query, fetchall=False, **params):
"""Executes the given SQL query against the connecte... |
Bulk insert or update.
def bulk_query(self, query, *multiparams):
"""Bulk insert or update."""
self._conn.execute(text(query), *multiparams) |
Like Connection.query, but takes a filename to load a query from.
def query_file(self, path, fetchall=False, **params):
"""Like Connection.query, but takes a filename to load a query from."""
# If path doesn't exists
if not os.path.exists(path):
raise IOError("File '{}' not found!"... |
Like Connection.bulk_query, but takes a filename to load a query
from.
def bulk_query_file(self, path, *multiparams):
"""Like Connection.bulk_query, but takes a filename to load a query
from.
"""
# If path doesn't exists
if not os.path.exists(path):
raise I... |
Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
generator of tuples containing the match and its score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we w... |
Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
list of tuples containing the match and its score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want t... |
Get a list of the best matches to a collection of choices.
Convenience function for getting the choices with best scores.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for trans... |
Find the single best match above a score in a list of choices.
This is a convenience method which returns the single best choice.
See extract() for the full arguments list.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
ex... |
This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
sinc... |
If both objects aren't either both string or unicode instances force them to unicode
def make_type_consistent(s1, s2):
"""If both objects aren't either both string or unicode instances force them to unicode"""
if isinstance(s1, str) and isinstance(s2, str):
return s1, s2
elif isinstance(s1, unicod... |
Process string by
-- removing all but letters and numbers
-- trim whitespace
-- force to lower case
if force_ascii == True, force convert to ascii
def full_process(s, force_ascii=False):
"""Process string by
-- removing all but letters and numbers
-- trim whitespace
... |
Return the ratio of the most similar substring
as a number between 0 and 100.
def partial_ratio(s1, s2):
""""Return the ratio of the most similar substring
as a number between 0 and 100."""
s1, s2 = utils.make_type_consistent(s1, s2)
if len(s1) <= len(s2):
shorter = s1
longer = s2
... |
Return a cleaned string with token sorted.
def _process_and_sort(s, force_ascii, full_process=True):
"""Return a cleaned string with token sorted."""
# pull tokens
ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s
tokens = ts.split()
# sort tokens and join
sorted_strin... |
Return a measure of the sequences' similarity between 0 and 100
but sorting the token before comparing.
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return a measure of the sequences' similarity between 0 and 100
but sorting the token before comparing.
"""
return _token_sor... |
Return the ratio of the most similar substring as a number between
0 and 100 but sorting the token before comparing.
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return the ratio of the most similar substring as a number between
0 and 100 but sorting the token before compar... |
Find all alphanumeric tokens in each string...
- treat them as a set
- construct two strings of the form:
<sorted_intersection><sorted_remainder>
- take ratios of those two strings
- controls for unordered partial matches
def _token_set(s1, s2, partial=True, force_ascii=True... |
Quick ratio comparison between two strings.
Runs full_process from utils on both strings
Short circuits if either of the strings is empty after processing.
:param s1:
:param s2:
:param force_ascii: Allow only ASCII characters (Default: True)
:full_process: Process inputs, used here to avoid do... |
Unicode quick ratio
Calls QRatio with force_ascii set to False
:param s1:
:param s2:
:return: similarity ratio
def UQRatio(s1, s2, full_process=True):
"""
Unicode quick ratio
Calls QRatio with force_ascii set to False
:param s1:
:param s2:
:return: similarity ratio
"""
... |
Return a measure of the sequences' similarity between 0 and 100, using different algorithms.
**Steps in the order they occur**
#. Run full_process from utils on both strings
#. Short circuit if this makes either string empty
#. Take the ratio of the two processed strings (fuzz.ratio)
#. Run checks... |
Return a measure of the sequences' similarity between 0 and 100,
using different algorithms. Same as WRatio but preserving unicode.
def UWRatio(s1, s2, full_process=True):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms. Same as WRatio but preserving unicode.
... |
Clean function to know how much time took the execution of one statement
def print_result_from_timeit(stmt='pass', setup='pass', number=1000000):
"""
Clean function to know how much time took the execution of one statement
"""
units = ["s", "ms", "us", "ns"]
duration = timeit(stmt, setup, number=in... |
Deploy this ``Model`` to an ``Endpoint`` and optionally return a ``Predictor``.
Create a SageMaker ``Model`` and ``EndpointConfig``, and deploy an ``Endpoint`` from this ``Model``.
If ``self.predictor_cls`` is not None, this method returns a the result of invoking
``self.predictor_cls`` on the ... |
Create a SageMaker Model Entity
Args:
instance_type (str): The EC2 instance type that this Model will be used for, this is only
used to determine if the image needs GPU support or not.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint... |
Delete the SageMaker model backing this pipeline model. This does not delete the list of SageMaker models used
in multiple containers to build the inference pipeline.
def delete_model(self):
"""Delete the SageMaker model backing this pipeline model. This does not delete the list of SageMaker models use... |
Stream the output of a process to stdout
This function takes an existing process that will be polled for output. Only stdout
will be polled and sent to sys.stdout.
Args:
process(subprocess.Popen): a process that has been started with
stdout=PIPE and stderr=STDOUT
Returns (int): pr... |
Run a training job locally using docker-compose.
Args:
input_data_config (dict): The Input Data Configuration, this contains data such as the
channels to be used for training.
hyperparameters (dict): The HyperParameters for the training job.
job_name (str): Na... |
Host a local endpoint using docker-compose.
Args:
primary_container (dict): dictionary containing the container runtime settings
for serving. Expected keys:
- 'ModelDataUrl' pointing to a file or s3:// location.
- 'Environment' a dictionary of environm... |
Stop the serving container.
The serving container runs in async mode to allow the SDK to do other tasks.
def stop_serving(self):
"""Stop the serving container.
The serving container runs in async mode to allow the SDK to do other tasks.
"""
if self.container:
self.... |
Get the model artifacts from all the container nodes.
Used after training completes to gather the data from all the individual containers. As the
official SageMaker Training Service, it will override duplicate files if multiple containers have
the same file names.
Args:
com... |
Write the config files for the training containers.
This method writes the hyperparameters, resources and input data configuration files.
Args:
host (str): Host to write the configuration for
hyperparameters (dict): Hyperparameters for training.
input_data_config (d... |
Writes a config file describing a training/hosting environment.
This method generates a docker compose configuration file, it has an entry for each container
that will be created (based on self.hosts). it calls
:meth:~sagemaker.local_session.SageMakerContainer._create_docker_host to generate t... |
Generate a list of :class:`~sagemaker.local_session.Volume` required for the container to start.
It takes a folder with the necessary files for training and creates a list of opt volumes that
the Container needs to start.
Args:
host (str): container for which the volumes will be ge... |
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory ... |
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
def t... |
Set hyperparameters needed for training.
Args:
* records (:class:`~RecordSet`): The records to train this ``Estimator`` on.
* mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a
default value will be used.
* job_nam... |
Return the ECR URI of an image.
Args:
region (str): AWS region where the image is uploaded.
framework (str): framework used by the image.
instance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized).
framework_version (str): The ver... |
Validate that the source directory exists and it contains the user script
Args:
script (str): Script filename.
directory (str): Directory containing the source file.
Raises:
ValueError: If ``directory`` does not exist, is not a directory, or does not contain ``script``.
def validate_... |
Package source files and upload a compress tar file to S3. The S3 location will be
``s3://<bucket>/s3_key_prefix/sourcedir.tar.gz``.
If directory is an S3 URI, an UploadedCode object will be returned, but nothing will be
uploaded to S3 (this allow reuse of code already in S3).
If directory is None, th... |
Extract the framework and Python version from the image name.
Args:
image_name (str): Image URI, which should be one of the following forms:
legacy:
'<account>.dkr.ecr.<region>.amazonaws.com/sagemaker-<fw>-<py_ver>-<device>:<container_version>'
legacy:
'<acco... |
Extract the framework version from the image tag.
Args:
image_tag (str): Image tag, which should take the form '<framework_version>-<device>-<py_version>'
Returns:
str: The framework version.
def framework_version_from_tag(image_tag):
"""Extract the framework version from the image tag.
... |
Returns an (s3 bucket, key name/prefix) tuple from a url with an s3 scheme
Args:
url (str):
Returns:
tuple: A tuple containing:
str: S3 bucket name
str: S3 key
def parse_s3_url(url):
"""Returns an (s3 bucket, key name/prefix) tuple from a url with an s3 scheme
... |
Returns the s3 key prefix for uploading code during model deployment
The location returned is a potential concatenation of 2 parts
1. code_location_key_prefix if it exists
2. model_name or a name derived from the image
Args:
code_location_key_prefix (str): the s3 key prefix from code_l... |
Create a training job in Local Mode
Args:
TrainingJobName (str): local training job name.
AlgorithmSpecification (dict): Identifies the training algorithm to use.
InputDataConfig (dict): Describes the training dataset and the location where it is stored.
OutputDat... |
Describe a local training job.
Args:
TrainingJobName (str): Training job name to describe.
Returns: (dict) DescribeTrainingJob Response.
def describe_training_job(self, TrainingJobName):
"""Describe a local training job.
Args:
TrainingJobName (str): Training j... |
Create a Local Model Object
Args:
ModelName (str): the Model Name
PrimaryContainer (dict): a SageMaker primary container definition
def create_model(self, ModelName, PrimaryContainer, *args, **kwargs): # pylint: disable=unused-argument
"""Create a Local Model Object
A... |
Initialize this Local SageMaker Session.
def _initialize(self, boto_session, sagemaker_client, sagemaker_runtime_client):
"""Initialize this Local SageMaker Session."""
self.boto_session = boto_session or boto3.Session()
self._region_name = self.boto_session.region_name
if self._regio... |
Return a container definition with framework configuration set in model environment variables.
Args:
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
accelerator_type (str): The Elastic Inference accelerator type to deploy to the instance ... |
Upload the training ``array`` and ``labels`` arrays to ``num_shards`` s3 objects,
stored in "s3://``bucket``/``key_prefix``/".
def upload_numpy_to_s3_shards(num_shards, s3, bucket, key_prefix, array, labels=None):
"""Upload the training ``array`` and ``labels`` arrays to ``num_shards`` s3 objects,
stored i... |
Return docker registry for the given AWS region
Note: Not all the algorithms listed below have an Amazon Estimator implemented. For full list of
pre-implemented Estimators, look at:
https://github.com/aws/sagemaker-python-sdk/tree/master/src/sagemaker/amazon
def registry(region_name, algorithm=None):
... |
Return algorithm image URI for the given AWS region, repository name, and repository version
def get_image_uri(region_name, repo_name, repo_version=1):
"""Return algorithm image URI for the given AWS region, repository name, and repository version"""
repo = '{}:{}'.format(repo_name, repo_version)
return '{... |
Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded.
Returns:
... |
Set hyperparameters needed for training.
Args:
* records (:class:`~RecordSet`): The records to train this ``Estimator`` on.
* mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a
default value will be used.
* job_nam... |
Fit this Estimator on serialized Record objects, stored in S3.
``records`` should be an instance of :class:`~RecordSet`. This defines a collection of
S3 data files to train this ``Estimator`` on.
Training data is expected to be encoded as dense or sparse vectors in the "values" feature
... |
Build a :class:`~RecordSet` from a numpy :class:`~ndarray` matrix and label vector.
For the 2D ``ndarray`` ``train``, each row is converted to a :class:`~Record` object.
The vector is stored in the "values" entry of the ``features`` property of each Record.
If ``labels`` is not None, each corre... |
Return an Instance of :class:`sagemaker.local.data.DataSource` that can handle
the provided data_source URI.
data_source can be either file:// or s3://
Args:
data_source (str): a valid URI that points to a data source.
sagemaker_session (:class:`sagemaker.session.Session`): a SageMaker Ses... |
Return an Instance of :class:`sagemaker.local.data.Splitter` according to
the specified `split_type`.
Args:
split_type (str): either 'Line' or 'RecordIO'. Can be left as None to signal no data split
will happen.
Returns
:class:`sagemaker.local.data.Splitter`: an Instance of a S... |
Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy`
Args:
strategy (str): Either 'SingleRecord' or 'MultiRecord'
splitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from.
Returns
:class:`sagemaker.local.data.BatchStrategy`: a... |
Retrieve the list of absolute paths to all the files in this data source.
Returns:
List[str] List of absolute paths.
def get_file_list(self):
"""Retrieve the list of absolute paths to all the files in this data source.
Returns:
List[str] List of absolute paths.
... |
Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to the root directory of this data source.
def get_root_dir(self):
"""Retrieve the absolute path to the root directory of this data source.
Returns:
str: absolute path to ... |
Split a file into records using a specific strategy
This RecordIOSplitter splits the data into individual RecordIO records.
Args:
file (str): path to the file to split
Returns: generator for the individual records that were split from the file
def split(self, file):
"""Sp... |
Group together as many records as possible to fit in the specified size
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be fitted to.
passing 0 means unlimited size.
Returns:
genera... |
Group together as many records as possible to fit in the specified size
This SingleRecordStrategy will not group any record and will return them one by one as
long as they are within the maximum size.
Args:
file (str): file path to read the records from.
size (int): max... |
Return hyperparameters used by your custom Chainer code during training.
def hyperparameters(self):
"""Return hyperparameters used by your custom Chainer code during training."""
hyperparameters = super(Chainer, self).hyperparameters()
additional_hyperparameters = {Chainer._use_mpi: self.use_m... |
Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
model_serv... |
Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded.
Returns:
... |
Writes a numpy array to a dense tensor
def write_numpy_to_dense_tensor(file, array, labels=None):
"""Writes a numpy array to a dense tensor"""
# Validate shape of array and labels, resolve array and label types
if not len(array.shape) == 2:
raise ValueError("Array must be a Matrix")
if labels ... |
Writes a scipy sparse matrix to a sparse tensor
def write_spmatrix_to_sparse_tensor(file, array, labels=None):
"""Writes a scipy sparse matrix to a sparse tensor"""
if not issparse(array):
raise TypeError("Array must be sparse")
# Validate shape of array and labels, resolve array and label types
... |
Eagerly read a collection of amazon Record protobuf objects from file.
def read_records(file):
"""Eagerly read a collection of amazon Record protobuf objects from file."""
records = []
for record_data in read_recordio(file):
record = Record()
record.ParseFromString(record_data)
reco... |
Writes a single data point as a RecordIO record to the given file.
def _write_recordio(f, data):
"""Writes a single data point as a RecordIO record to the given file."""
length = len(data)
f.write(struct.pack('I', _kmagic))
f.write(struct.pack('I', length))
pad = (((length + 3) >> 2) << 2) - length... |
Return a dict created by ``sagemaker.container_def()`` for deploying this model to a specified instance type.
Subclasses can override this to provide custom container definitions for
deployment to a specific instance type. Called by ``deploy()``.
Args:
instance_type (str): The EC2 ... |
Create a SageMaker Model Entity
Args:
instance_type (str): The EC2 instance type that this Model will be used for, this is only
used to determine if the image needs GPU support or not.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint... |
Compile this ``Model`` with SageMaker Neo.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, ... |
Deploy this ``Model`` to an ``Endpoint`` and optionally return a ``Predictor``.
Create a SageMaker ``Model`` and ``EndpointConfig``, and deploy an ``Endpoint`` from this ``Model``.
If ``self.predictor_cls`` is not None, this method returns a the result of invoking
``self.predictor_cls`` on the ... |
Return a ``Transformer`` that uses this Model.
Args:
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'.
strategy (str): The strategy used to decide how to batch records in a single request (def... |
Return a container definition with framework configuration set in model environment variables.
This also uploads user-supplied code to S3.
Args:
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
accelerator_type (str): The Elastic ... |
Create a SageMaker Model Entity
Args:
*args: Arguments coming from the caller. This class
does not require any so they are ignored.
def _create_sagemaker_model(self, *args): # pylint: disable=unused-argument
"""Create a SageMaker Model Entity
Args:
*ar... |
Return the inference from the specified endpoint.
Args:
data (object): Input data for which you want the model to provide inference.
If a serializer was specified when creating the RealTimePredictor, the result of the
serializer is sent as input data. Otherwise the d... |
Delete the Amazon SageMaker endpoint backing this predictor. Also delete the endpoint configuration attached
to it if delete_endpoint_config is True.
Args:
delete_endpoint_config (bool, optional): Flag to indicate whether to delete endpoint configuration together
with endpoi... |
Deletes the Amazon SageMaker models backing this predictor.
def delete_model(self):
"""Deletes the Amazon SageMaker models backing this predictor.
"""
request_failed = False
failed_models = []
for model_name in self._model_names:
try:
self.sagemaker_... |
A pandas dataframe with lots of interesting results about this object.
Created by calling SageMaker List and Describe APIs and converting them into
a convenient tabular summary.
Args:
force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
def dataframe(self,... |
Clear the object of all local caches of API methods.
def clear_cache(self):
"""Clear the object of all local caches of API methods.
"""
super(HyperparameterTuningJobAnalytics, self).clear_cache()
self._tuning_job_describe_result = None
self._training_job_summaries = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.