text stringlengths 81 112k |
|---|
Partial objects do not serialize correctly in python2.x -- this fixes the bugs
def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords)) |
Save a file
def save_file(self, obj):
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
ra... |
Hack function for saving numpy ufunc objects
def save_ufunc(self, obj):
"""Hack function for saving numpy ufunc objects"""
name = obj.__name__
numpy_tst_mods = ['numpy', 'scipy.special']
for tst_mod_name in numpy_tst_mods:
tst_mod = sys.modules.get(tst_mod_name, None)
... |
Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descri... |
Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
exisiting proto in the d... |
Convert a normalizer model to the protobuf spec.
Parameters
----------
model: Normalizer
A Normalizer.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
... |
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nslave number of slave process to start up
args arguments to launch each job
this usually includes th... |
Consume byte-code
def consume(self):
'''
Consume byte-code
'''
generic_consume = getattr(self, 'generic_consume', None)
for instr in disassembler(self.code):
method_name = 'consume_%s' % (instr.opname)
method = getattr(self, method_name, generic_... |
Create a (binary or multi-class) classifier model of type
:class:`~turicreate.random_forest_classifier.RandomForestClassifier` using
an ensemble of decision trees trained on subsets of the data.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a targe... |
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of n... |
Get the right converter function for Keras
def _get_layer_converter_fn(layer):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
return _KERAS_LAYER_REGISTRY[layer_type]
else:
raise TypeError("Keras layer of type %s is... |
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)
... |
Convert a Keras model to Core ML protobuf specification (.mlmodel).
Parameters
----------
model: Keras model object | str | (str, str)
A trained Keras neural network model which can be one of the following:
- a Keras model object
- a string with the path to a Keras model file (h5)... |
Convert a boosted tree model to protobuf format.
Parameters
----------
decision_tree : RandomForestRegressor
A trained scikit-learn tree model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: A... |
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distanc... |
Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library path to xgboost
def find_lib_path():
"""Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library... |
Check if a model is of the right type. Raise error if not.
Parameters
----------
model: model
Any scikit-learn model
expected_type: Type
Expected type of the scikit-learn.
def check_expected_type(model, expected_type):
"""Check if a model is of the right type. Raise error if not.
... |
Convert a LIBSVM model to Core ML format.
Parameters
----------
model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR)
or string path to a saved model.
input_names: str | [str]
Name of the input column(s).
If a single string is used (the default) the input will be an ar... |
Appends an item to the list. Similar to list.append().
def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self._values.append(self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified() |
Inserts the item at the specified position. Similar to list.insert().
def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._values.insert(key, self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Mo... |
Extends by appending the given iterable. Similar to list.extend().
def extend(self, elem_seq):
"""Extends by appending the given iterable. Similar to list.extend()."""
if elem_seq is None:
return
try:
elem_seq_iter = iter(elem_seq)
except TypeError:
if not elem_seq:
# silentl... |
Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
"""
self._... |
Removes an item from the list. Similar to list.remove().
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified() |
Removes and returns an item at a given index. Similar to list.pop().
def pop(self, key=-1):
"""Removes and returns an item at a given index. Similar to list.pop()."""
value = self._values[key]
self.__delitem__(key)
return value |
Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
def add(self, **kwargs):
"""Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
"""
new_element = self._message_descripto... |
Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
message_class = self._message_desc... |
Returns the elements of B that are not in A.
def difference (b, a):
""" Returns the elements of B that are not in A.
"""
a = set(a)
result = []
for item in b:
if item not in a:
result.append(item)
return result |
Removes from set1 any items which don't appear in set2 and returns the result.
def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
"""
assert is_iterable(set1)
assert is_iterable(set2)
result = []
for v in set1:
if v in set2:... |
Returns true iff all elements of 'small' exist in 'large'.
def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
"""
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True |
Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
def equal (a, b):
""" Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
"""
assert is... |
Annotate your images loaded in either an SFrame or SArray Format
The annotate util is a GUI assisted application used to create labels in
SArray Image data. Specifying a column, with dtype Image, in an SFrame
works as well since SFrames are composed of multiple SArrays.
When the GUI is... |
Recover the last annotated SFrame.
If you annotate an SFrame and forget to assign it to a variable, this
function allows you to recover the last annotated SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the recovered annotation data.... |
Convert a DictVectorizer model to the protobuf spec.
Parameters
----------
model: DictVectorizer
A fitted DictVectorizer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object ... |
Internal function.
This function is called via a call back returning from IPC to Cython
to Python. It tries to perform incremental printing to IPython Notebook or
Jupyter Notebook and when all else fails, just prints locally.
def print_callback(val):
"""
Internal function.
This function is call... |
Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
... |
Truncates the remainder part after division.
def _RoundTowardZero(value, divider):
"""Truncates the remainder part after division."""
# For some languanges, the sign of the remainder is implementation
# dependent if any of the operands is negative. Here we enforce
# "rounded toward zero" semantics. For example... |
Checks whether the path is valid for Message Descriptor.
def _IsValidPath(message_descriptor, path):
"""Checks whether the path is valid for Message Descriptor."""
parts = path.split('.')
last = parts.pop()
for name in parts:
field = message_descriptor.fields_by_name[name]
if (field is None or
... |
Raises ValueError if message is not a FieldMask.
def _CheckFieldMaskMessage(message):
"""Raises ValueError if message is not a FieldMask."""
message_descriptor = message.DESCRIPTOR
if (message_descriptor.name != 'FieldMask' or
message_descriptor.file.name != 'google/protobuf/field_mask.proto'):
raise V... |
Converts a path name from snake_case to camelCase.
def _SnakeCaseToCamelCase(path_name):
"""Converts a path name from snake_case to camelCase."""
result = []
after_underscore = False
for c in path_name:
if c.isupper():
raise Error('Fail to print FieldMask to Json string: Path name '
... |
Converts a field name from camelCase to snake_case.
def _CamelCaseToSnakeCase(path_name):
"""Converts a field name from camelCase to snake_case."""
result = []
for c in path_name:
if c == '_':
raise ParseError('Fail to parse FieldMask: Path name '
'{0} must not contain "_"s.'.for... |
Merge all fields specified by a sub-tree from source to destination.
def _MergeMessage(
node, source, destination, replace_message, replace_repeated):
"""Merge all fields specified by a sub-tree from source to destination."""
source_descriptor = source.DESCRIPTOR
for name in node:
child = node[name]
... |
Adds the field paths descended from node to field_mask.
def _AddFieldPaths(node, prefix, field_mask):
"""Adds the field paths descended from node to field_mask."""
if not node:
field_mask.paths.append(prefix)
return
for name in sorted(node):
if prefix:
child_path = prefix + '.' + name
else:... |
Packs the specified message into current Any message.
def Pack(self, msg, type_url_prefix='type.googleapis.com/'):
"""Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
... |
Unpacks the current Any message into specified message.
def Unpack(self, msg):
"""Unpacks the current Any message into specified message."""
descriptor = msg.DESCRIPTOR
if not self.Is(descriptor):
return False
msg.ParseFromString(self.value)
return True |
Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
def ToJsonString(self):
"""C... |
Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing ... |
Converts nanoseconds since epoch to Timestamp.
def FromNanoseconds(self, nanos):
"""Converts nanoseconds since epoch to Timestamp."""
self.seconds = nanos // _NANOS_PER_SECOND
self.nanos = nanos % _NANOS_PER_SECOND |
Converts microseconds since epoch to Timestamp.
def FromMicroseconds(self, micros):
"""Converts microseconds since epoch to Timestamp."""
self.seconds = micros // _MICROS_PER_SECOND
self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND |
Converts milliseconds since epoch to Timestamp.
def FromMilliseconds(self, millis):
"""Converts milliseconds since epoch to Timestamp."""
self.seconds = millis // _MILLIS_PER_SECOND
self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND |
Converts Timestamp to datetime.
def ToDatetime(self):
"""Converts Timestamp to datetime."""
return datetime.utcfromtimestamp(
self.seconds + self.nanos / float(_NANOS_PER_SECOND)) |
Converts datetime to Timestamp.
def FromDatetime(self, dt):
"""Converts datetime to Timestamp."""
td = dt - datetime(1970, 1, 1)
self.seconds = td.seconds + td.days * _SECONDS_PER_DAY
self.nanos = td.microseconds * _NANOS_PER_MICROSECOND |
Converts a Duration to microseconds.
def ToMicroseconds(self):
"""Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)
return self.seconds * _MICROS_PER_SECOND + micros |
Converts a Duration to milliseconds.
def ToMilliseconds(self):
"""Converts a Duration to milliseconds."""
millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND)
return self.seconds * _MILLIS_PER_SECOND + millis |
Converts microseconds to Duration.
def FromMicroseconds(self, micros):
"""Converts microseconds to Duration."""
self._NormalizeDuration(
micros // _MICROS_PER_SECOND,
(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) |
Converts milliseconds to Duration.
def FromMilliseconds(self, millis):
"""Converts milliseconds to Duration."""
self._NormalizeDuration(
millis // _MILLIS_PER_SECOND,
(millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND) |
Converts Duration to timedelta.
def ToTimedelta(self):
"""Converts Duration to timedelta."""
return timedelta(
seconds=self.seconds, microseconds=_RoundTowardZero(
self.nanos, _NANOS_PER_MICROSECOND)) |
Convertd timedelta to Duration.
def FromTimedelta(self, td):
"""Convertd timedelta to Duration."""
self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY,
td.microseconds * _NANOS_PER_MICROSECOND) |
Set Duration by seconds and nonas.
def _NormalizeDuration(self, seconds, nanos):
"""Set Duration by seconds and nonas."""
# Force nanos to be negative if the duration is negative.
if seconds < 0 and nanos > 0:
seconds += 1
nanos -= _NANOS_PER_SECOND
self.seconds = seconds
self.nanos = n... |
Converts FieldMask to string according to proto3 JSON spec.
def ToJsonString(self):
"""Converts FieldMask to string according to proto3 JSON spec."""
camelcase_paths = []
for path in self.paths:
camelcase_paths.append(_SnakeCaseToCamelCase(path))
return ','.join(camelcase_paths) |
Checks whether the FieldMask is valid for Message Descriptor.
def IsValidForDescriptor(self, message_descriptor):
"""Checks whether the FieldMask is valid for Message Descriptor."""
for path in self.paths:
if not _IsValidPath(message_descriptor, path):
return False
return True |
Gets all direct fields of Message Descriptor to FieldMask.
def AllFieldsFromDescriptor(self, message_descriptor):
"""Gets all direct fields of Message Descriptor to FieldMask."""
self.Clear()
for field in message_descriptor.fields:
self.paths.append(field.name) |
Merges mask1 and mask2 into this FieldMask.
def Union(self, mask1, mask2):
"""Merges mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
tree.MergeFromFieldMask(mask2)
tree.ToFieldMask(self) |
Intersects mask1 and mask2 into this FieldMask.
def Intersect(self, mask1, mask2):
"""Intersects mask1 and mask2 into this FieldMask."""
_CheckFieldMaskMessage(mask1)
_CheckFieldMaskMessage(mask2)
tree = _FieldMaskTree(mask1)
intersection = _FieldMaskTree()
for path in mask2.paths:
tree.I... |
Merges fields specified in FieldMask from source to destination.
Args:
source: Source message.
destination: The destination message to be merged into.
replace_message_field: Replace message field if True. Merge message
field if False.
replace_repeated_field: Replace repeated field... |
Adds a field path into the tree.
If the field path to add is a sub-path of an existing field path
in the tree (i.e., a leaf node), it means the tree already matches
the given path so nothing will be added to the tree. If the path
matches an existing non-leaf node in the tree, that non-leaf node
wil... |
Calculates the intersection part of a field path with this tree.
Args:
path: The field path to calculates.
intersection: The out tree to record the intersection part.
def IntersectPath(self, path, intersection):
"""Calculates the intersection part of a field path with this tree.
Args:
p... |
Adds leaf nodes begin with prefix to this tree.
def AddLeafNodes(self, prefix, node):
"""Adds leaf nodes begin with prefix to this tree."""
if not node:
self.AddPath(prefix)
for name in node:
child_path = prefix + '.' + name
self.AddLeafNodes(child_path, node[name]) |
Merge all fields specified by this tree from source to destination.
def MergeMessage(
self, source, destination,
replace_message, replace_repeated):
"""Merge all fields specified by this tree from source to destination."""
_MergeMessage(
self._root, source, destination, replace_message, rep... |
Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command... |
Convert a Nu Support Vector Regression (NuSVR) model to the protobuf spec.
Parameters
----------
model: NuSVR
A trained NuSVR encoder model.
feature_names: [str]
Name of the input columns.
target: str
Name of the output column.
Returns
-------
model_spec: An ob... |
Create a :class:`~turicreate.linear_regression.LinearRegression` to
predict a scalar target variable as a linear function of one or more
features. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
The li... |
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
def export_coreml(self, filename):
"""
Export the model in Core... |
Return target value predictions for ``dataset``, using the trained
linear regression model. This method can be used to get fitted values
for the model by inputting the training dataset.
Parameters
----------
dataset : SFrame | pandas.Dataframe
Dataset of new observat... |
r"""Evaluate the model by making target value predictions and comparing
to actual values.
Two metrics are used to evaluate linear regression models. The first
is root-mean-squared error (RMSE) while the second is the absolute
value of the maximum error between the actual and predicted ... |
Convert array into a sequence of successive possibly overlapping frames.
An n-dimensional array of shape (num_samples, ...) is converted into an
(n+1)-D array of shape (num_frames, window_length, ...), where each frame
starts hop_length points after the preceding one.
This is accomplished using stride_tricks,... |
Calculate a "periodic" Hann window.
The classic Hann window is defined as a raised cosine that starts and
ends on zero, and where every value appears twice, except the middle
point for an odd-length window. Matlab calls this a "symmetric" window
and np.hanning() returns it. However, for Fourier analysis, thi... |
Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D n... |
Return a matrix that can post-multiply spectrogram rows to make mel.
Returns a np.array matrix A that can be used to post-multiply a matrix S of
spectrogram values (STFT magnitudes) arranged as frames x bins to generate a
"mel spectrogram" M of frames x num_mel_bins. M = S A.
The classic HTK algorithm exploi... |
Convert waveform to a log magnitude mel-frequency spectrogram.
Args:
data: 1D np.array of waveform data.
audio_sample_rate: The sampling rate of data.
log_offset: Add this to values when taking log to avoid -Infs.
window_length_secs: Duration of each window to analyze.
hop_length_secs: Advance be... |
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
... |
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_ty... |
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`. In addition, a
target column is generated with values dependent on the randomly
generated features in a given row.
`column_ty... |
Input:
spec : mlmodel spec
input_shape_dict: dictionary of string --> tuple
string: input name
tuple: input shape as a 5 length tuple in order (Seq, Batch, C, H, W)
If input_shape_dict is not provided, input shapes are inferred from the input descr... |
Convert a svm model to the protobuf spec.
This currently supports:
* C-SVC
* nu-SVC
* Epsilon-SVR
* nu-SVR
Parameters
----------
model_path: libsvm_model
Libsvm representation of the model.
feature_names : [str] | str
Names of each of the features.
targ... |
Create an :class:`ActivityClassifier` model.
Parameters
----------
dataset : SFrame
Input data which consists of `sessions` of data where each session is
a sequence of data. The data must be in `stacked` format, grouped by
session. Within each session, the data is assumed to be sort... |
Encode targets to integers in [0, num_classes - 1]
def _encode_target(data, target, mapping=None):
""" Encode targets to integers in [0, num_classes - 1] """
if mapping is None:
mapping = {t: i for i, t in enumerate(sorted(data[target].unique()))}
data[target] = data[target].apply(lambda t: mappin... |
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
def export_coreml(self, filename):
"""
Export the model in Core... |
Return predictions for ``dataset``, using the trained activity classifier.
Predictions can be generated as class labels, or as a probability
vector with probabilities for each class.
The activity classifier generates a single prediction for each
``prediction_window`` rows in ``dataset``... |
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the session_id, target and features used for model trai... |
Return a classification, for each ``prediction_window`` examples in the
``dataset``, using the trained activity classification model. The output
SFrame contains predictions as both class labels as well as probabilities
that the predicted value is the associated label.
Parameters
... |
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `prediction_id`,
`class`, and `probability`, or `rank`, depending on the ``output_type``
parameter.
Parameters
----------
dataset : SFrame
... |
Count the occurrances of the different characters in the files
def count_characters(root, out):
"""Count the occurrances of the different characters in the files"""
if os.path.isfile(root):
with open(root, 'rb') as in_f:
for line in in_f:
for char in line:
... |
The main function of the script
def main():
"""The main function of the script"""
desc = 'Generate character statistics from a source tree'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src',
required=True,
help='The root of the s... |
Save a protobuf model specification to file.
Parameters
----------
spec: Model_pb
Protobuf representation of the model
filename: str
File path where the spec gets saved.
Examples
--------
.. sourcecode:: python
>>> coremltools.utils.save_spec(spec, 'HousePricer.m... |
Load a protobuf model specification from file
Parameters
----------
filename: str
Location on disk (a valid filepath) from which the file is loaded
as a protobuf spec.
Returns
-------
model_spec: Model_pb
Protobuf representation of the model
Examples
--------
... |
Returns a list of neural network layers if the model contains any.
Parameters
----------
spec: Model_pb
A model protobuf specification.
Returns
-------
[NN layer]
list of all layers (including layers from elements of a pipeline
def _get_nn_layers(spec):
"""
Returns a l... |
Evaluate a CoreML regression model and compare against predictions
from the original framework (for testing correctness of conversion)
Parameters
----------
filename: [str | MLModel]
File path from which to load the MLModel from (OR) a loaded version of
MLModel.
data: [str | Datafr... |
Evaluate a CoreML classifier model and compare against predictions
from the original framework (for testing correctness of conversion). Use
this evaluation for models that don't deal with probabilities.
Parameters
----------
filename: [str | MLModel]
File from where to load the model from (... |
Evaluate a classifier specification for testing.
Parameters
----------
filename: [str | Model]
File from where to load the model from (OR) a loaded
version of the MLModel.
data: [str | Dataframe]
Test data on which to evaluate the models (dataframe,
or path to a csv fil... |
Rename a feature in the specification.
Parameters
----------
spec: Model_pb
The specification containing the feature to rename.
current_name: str
Current name of the feature. If this feature doesn't exist, the rename
is a no-op.
new_name: str
New name of the featur... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.