text stringlengths 81 112k |
|---|
Reads caffe formatted mean file
:param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix
:return: mean image, converted from BGR to RGB format
def read_caffe_mean(caffe_mean_file):
"""
Reads caffe formatted mean file
:param caffe_mean_file: path to caffe mean file, pres... |
Helper function for margin-based loss. Return a distance matrix given a matrix.
def get_distance(F, x):
"""Helper function for margin-based loss. Return a distance matrix given a matrix."""
n = x.shape[0]
square = F.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (... |
cross entropy loss with a mask
def cross_entropy_loss(inputs, labels, rescale_loss=1):
""" cross entropy loss with a mask """
criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(weight=rescale_loss)
loss = criterion(inputs, labels)
mask = S.var('mask')
loss = loss * S.reshape(mask, shape=(-1,))
r... |
word embedding + LSTM Projected
def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size):
""" word embedding + LSTM Projected """
state_names = []
data = S.var('data')
weight = S.var("encoder_weight", stype='row_sparse')
embed = S.sparse.Embedding(data=data, weight=weig... |
Sampled softmax via importance sampling.
This under-estimates the full softmax and is only used for training.
def sampled_softmax(num_classes, num_samples, in_dim, inputs, weight, bias,
sampled_values, remove_accidental_hits=True):
""" Sampled softmax via importance sampling.
... |
Split labels into `num_splits` and
generate candidates based on log-uniform distribution.
def generate_samples(label, num_splits, sampler):
""" Split labels into `num_splits` and
generate candidates based on log-uniform distribution.
"""
def listify(x):
return x if isinstance(x, lis... |
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
"""
Load & generate training examples from multivariate time series data
:return: data ite... |
Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to lo... |
Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with spec... |
Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
def _prepare_src_array(source_array, dtype):
"""Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is con... |
Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise.
def _prepare_default_dtype(src_array, dtype):
"""Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.n... |
check s1 == s2 if both are not None
def _check_shape(s1, s2):
"""check s1 == s2 if both are not None"""
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2)) |
Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
... |
Create a `CSRNDArray` based on data, indices and indptr
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
"""Create a `CSRNDArray` based on data, indices and indptr"""
# pylint: disable= no-member, prot... |
Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object ... |
Create a `RowSparseNDArray` based on data and indices
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
"""Create a `RowSparseNDArray` based on data and indices"""
storage_type = 'row_sparse'
# context
ct... |
Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_add(lhs, rhs)``
.. note::... |
Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_sub(lhs, rhs)``
.... |
Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)``
.. note::
If the corresp... |
Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)``
.. note::
If the corresponding dime... |
Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device contex... |
Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional ... |
Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an... |
Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data ty... |
The data types of the aux data for the BaseSparseNDArray.
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_typ... |
Return a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context.... |
Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
def check_format(self, full_check=True):
"""Check whether the NDArray ... |
A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance cr... |
Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
Thi... |
Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'num... |
Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDAr... |
Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` wi... |
Exports the MXNet model file, passed as a parameter, into ONNX model.
Accepts both symbol,parameter objects as well as json and params filepaths as input.
Operator support and coverage -
https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration
Parameters
----------
sym : str or ... |
Benchmarking both storage and dot
def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density,
rhs_density, dot_func, trans_lhs, lhs_stype,
rhs_stype, only_storage, distribution="uniform"):
""" Benchmarking both storage and dot
"""
lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_di... |
Convert caffe mean
Parameters
----------
binaryproto_fname : str
Filename of the mean
output : str, optional
Save the mean into mxnet's format
Returns
-------
NDArray
Mean in ndarray
def convert_mean(binaryproto_fname, output=None):
"""Convert caffe mean
P... |
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to... |
Loads the MXNet model file and
returns MXNet symbol and params (weights).
Parameters
----------
json_path : str
Path to the json file
params_path : str
Path to the params file
Returns
-------
sym : MXNet symbol
Model symbol object
params : params object
... |
Helper function to import module
def import_module(module_name):
"""Helper function to import module"""
import sys, os
import importlib
sys.path.append(os.path.dirname(__file__))
return importlib.import_module(module_name) |
Build network symbol for training SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
... |
Build network for testing SSD
Parameters
----------
network : str
base network symbol name
num_classes : int
number of object classes not including background
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_l... |
This is an internal helper function that can be used for either of these
but not both at the same time:
1. Record the output and gradient of output of an intermediate convolutional layer.
2. Record the gradients of the image.
Parameters
----------
image : NDArray
Image to visuaize. This... |
Get the output and gradients of output of a convolutional layer.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network'... |
Get the gradients of the image.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used.
def g... |
Convert gradients of image obtained using `get_image_grad`
into image. This shows parts of the image that is most strongly activating
the output neurons.
def grad_to_image(gradient):
"""Convert gradients of image obtained using `get_image_grad`
into image. This shows parts of the image that is most str... |
Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details
def get_cam(imggrad, conv_out):
"""Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details"""
weights = np.mean(imggrad, axis=(1, 2))
cam = np.ones(conv_out.shape[1:], dtype=np.float32)
for i, w in enumera... |
Draw a heatmap on top of the original image using intensities from activation_map
def get_img_heatmap(orig_img, activation_map):
"""Draw a heatmap on top of the original image using intensities from activation_map"""
heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL)
heatmap = cv2.cvtColor(heat... |
Convert gradients to grayscale. This gives a saliency map.
def to_grayscale(cv2im):
"""Convert gradients to grayscale. This gives a saliency map."""
# How strongly does each position activate the output
grayscale_im = np.sum(np.abs(cv2im), axis=0)
# Normalize between min and 99th percentile
im_max... |
Helper function for checking shape of label and prediction
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
wrap : boolean
If True, wrap labels/preds in a list if they are single NDArray
shape : boole... |
Creates evaluation metric from metric names or instances of EvalMetric
or a custom metric function.
Parameters
----------
metric : str or callable
Specifies the metric to create.
This argument must be one of the below:
- Name of a metric.
- An instance of `EvalMetric`.
... |
Creates a custom evaluation metric that receives its inputs as numpy arrays.
Parameters
----------
numpy_feval : callable(label, pred)
Custom evaluation function that receives labels and predictions for a minibatch
as numpy arrays and returns the corresponding custom metric as a floating po... |
Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
def get_config(self):
"""Save configurations of metric. Can be recreated
from configs with metric.create(``**config``)
"""
config = self._kwargs.copy()
config.update({
... |
Update the internal evaluation with named label and pred
Parameters
----------
labels : OrderedDict of str -> NDArray
name to array mapping for labels.
preds : OrderedDict of str -> NDArray
name to array mapping of predicted outputs.
def update_dict(self, label... |
Resets the internal evaluation result to initial state.
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0
self.sum_metric = 0.0
self.global_num_inst = 0
self.global_sum_metric = 0.0 |
Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str... |
Gets the current global evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
def get_global(self):
"""Gets the current global evaluation result.
Returns
-------
... |
Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
... |
Returns zipped name and value pairs for global results.
Returns
-------
list of tuples
A (name, value) tuple list.
def get_global_name_value(self):
"""Returns zipped name and value pairs for global results.
Returns
-------
list of tuples
... |
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
def update_binary_stats(self, label, pred):
"""
Update variou... |
Calculate the Matthew's Correlation Coefficent
def matthewscc(self, use_global=False):
"""
Calculate the Matthew's Correlation Coefficent
"""
if use_global:
if not self.global_total_examples:
return 0.
true_pos = float(self.global_true_positives)... |
Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False... |
Returns a new dataset with the first element of each sample
transformed by the transformer function `fn`.
This is useful, for example, when you only want to transform data
while keeping label as is.
Parameters
----------
fn : callable
A transformer function ... |
Forward the image through the LSTM network model
Parameters
----------
img_: int of array
Returns
----------
label_list: string of list
def forward_ocr(self, img_):
"""Forward the image through the LSTM network model
Parameters
----------
... |
Return a caffe_pb2.NetParameter object that defined in a prototxt file
def read_prototxt(fname):
"""Return a caffe_pb2.NetParameter object that defined in a prototxt file
"""
proto = caffe_pb2.NetParameter()
with open(fname, 'r') as f:
text_format.Merge(str(f.read()), proto)
return proto |
Returns layers in a caffe_pb2.NetParameter object
def get_layers(proto):
"""Returns layers in a caffe_pb2.NetParameter object
"""
if len(proto.layer):
return proto.layer
elif len(proto.layers):
return proto.layers
else:
raise ValueError('Invalid proto file.') |
Return a caffe_pb2.NetParameter object that defined in a binary
caffemodel file
def read_caffemodel(prototxt_fname, caffemodel_fname):
"""Return a caffe_pb2.NetParameter object that defined in a binary
caffemodel file
"""
if use_caffe:
caffe.set_mode_cpu()
net = caffe.Net(prototxt_f... |
Iterate over all layers
def layer_iter(layers, layer_names):
"""Iterate over all layers"""
if use_caffe:
for layer_idx, layer in enumerate(layers):
layer_name = re.sub('[-/]', '_', layer_names[layer_idx])
layer_type = layer.type
layer_blobs = layer.blobs
... |
Set up the configure of profiler (only accepts keyword arguments).
Parameters
----------
filename : string,
output file for profile data
profile_all : boolean,
all profile types enabled
profile_symbolic : boolean,
whether to profile symbolic operators
profile_imperative ... |
Set up the configure of profiler (Deprecated).
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
def... |
Set up the profiler state to 'run' or 'stop'.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be pro... |
Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally.
Parameters
----------
finished : boolean
Indicates whether to stop statistic output (dumping) after this dump.
Default is True
profile_process : string
whether to p... |
Return a printable string of aggregate profile stats.
Parameters
----------
reset: boolean
Indicates whether to clean aggeregate statistical data collected up to this point
def dumps(reset=False):
"""Return a printable string of aggregate profile stats.
Parameters
----------
reset... |
Pause profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
def pause(profile_process='worker'):
"""Pause profiling.
P... |
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
def resume(profile_process='worker'):
"""
Resume pa... |
Set counter value.
Parameters
----------
value : int
Value for the counter
def set_value(self, value):
"""Set counter value.
Parameters
----------
value : int
Value for the counter
"""
check_call(_LIB.MXProfileSetCounter(... |
Increment counter value.
Parameters
----------
value_change : int
Amount by which to add to the counter
def increment(self, delta=1):
"""Increment counter value.
Parameters
----------
value_change : int
Amount by which to add to the coun... |
Decrement counter value.
Parameters
----------
value_change : int
Amount by which to subtract from the counter
def decrement(self, delta=1):
"""Decrement counter value.
Parameters
----------
value_change : int
Amount by which to subtract... |
Set up the profiler state to record operator.
Parameters
----------
scope : string, optional
Indicates what scope the marker should refer to.
Can be 'global', 'process', thread', task', and 'marker'
Default is `process`.
def mark(self, scope='process'):
... |
r"""Get CUDA kernel from compiled module.
Parameters
----------
name : str
String name of the kernel.
signature : str
Function signature for the kernel. For example, if a kernel is
declared as::
extern "C" __global__ void axpy(const f... |
Launch cuda kernel.
Parameters
----------
args : tuple of NDArray or numbers
List of arguments for kernel. NDArrays are expected for pointer
types (e.g. `float*`, `double*`) while numbers are expected for
non-pointer types (e.g. `int`, `float`).
ctx :... |
Clear the internal statistics to initial state.
def reset(self):
"""Clear the internal statistics to initial state."""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_met... |
Update internal records. This function now only update internal buffer,
sum_metric and num_inst are updated in _update() function instead when
get() is called to return results.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2... |
update num_inst and sum_metric
def _update(self):
""" update num_inst and sum_metric """
aps = []
for k, v in self.records.items():
recall, prec = self._recall_prec(v, self.counts[k])
ap = self._average_precision(recall, prec)
aps.append(ap)
if se... |
get recall and precision from internal records
def _recall_prec(self, record, count):
""" get recall and precision from internal records """
record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0)
sorted_records = record[record[:,0].argsort()[::-1]]
tp = np.cumsum... |
calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
def _average_precision(self, rec, prec):
"""
calculate average precis... |
Insert records according to key
def _insert(self, key, records, count):
""" Insert records according to key """
if key not in self.records:
assert key not in self.counts
self.records[key] = records
self.counts[key] = count
else:
self.records[key] ... |
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
def _average_precision(sel... |
symbol: the pre-trained network symbol
arg_params: the argument parameters of the pre-trained model
num_classes: the number of classes for the fine-tune datasets
layer_name: the layer name before the last fully-connected layer
def get_fine_tune_model(symbol, arg_params, num_classes, layer_name, dtype='floa... |
Description : generate list for lip images
def _list_images(self, root):
"""
Description : generate list for lip images
"""
self.labels = []
self.items = []
valid_unseen_sub_idx = [1, 2, 20, 22]
skip_sub_idx = [21]
if self._mode == 'train':
... |
Description : Align to lip position
def align_generation(self, file_nm, padding=75):
"""
Description : Align to lip position
"""
align = Align(self._align_root + '/' + file_nm + '.align')
return nd.array(align.sentence(padding)) |
Switch on/off verbose mode
Parameters
----------
verbose : bool
switch on/off verbose mode
print_func : function
A function that computes statistics of initialized arrays.
Takes an `NDArray` and returns an `str`. Defaults to mean
absolute ... |
Internal verbose print function
Parameters
----------
desc : InitDesc or str
name of the array
init : str
initializer pattern
arr : NDArray
initialized array
def _verbose_print(self, desc, init, arr):
"""Internal verbose print functio... |
Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
def _legacy_init(self, name, arr):
"""Legacy initialization method.
Parameters
----------
nam... |
save imglist to disk
Parameters:
----------
fname : str
saved filename
def save_imglist(self, fname=None, root=None, shuffle=False):
"""
save imglist to disk
Parameters:
----------
fname : str
saved filename
"""
d... |
load class names from text file
Parameters:
----------
filename: str
file stores class names
dirname: str
file directory
def _load_class_names(self, filename, dirname):
"""
load class names from text file
Parameters:
----------
... |
download and read data into numpy
def read_data(label, image):
"""
download and read data into numpy
"""
base_url = 'http://yann.lecun.com/exdb/mnist/'
with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
... |
create data iterator with NDArrayIter
def get_mnist_iter(args, kv):
"""
create data iterator with NDArrayIter
"""
(train_lbl, train_img) = read_data(
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz')
(val_lbl, val_img) = read_data(
't10k-labels-idx1-ubyte.gz', 't10... |
Function factory for file extension argparse assertion
Args:
extension (string): the file extension to assert
Returns:
string: the supplied extension, if assertion is successful.
def make_file_extension_assertion(extension):
"""Function factory for file extension argparse a... |
generates the colormap for visualizing the segmentation mask
Args:
num_colors (int): the number of colors to generate in the output palette
Returns:
string: the supplied extension, if assertion is successful.
def get_palette(num_colors=256):
"""generates the... |
get the (1, 3, h, w) np.array data for the supplied image
Args:
img_path (string): the input image path
Returns:
np.array: image data in a (1, 3, h, w) shape
def get_data(img_path):
"""get the (1, 3, h, w) np.array data for the supplied image... |
Module main execution
def main():
"""Module main execution"""
# Initialization variables - update to change your model and execution context
model_prefix = "FCN8s_VGG16"
epoch = 19
# By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.
ctx = mx.cpu()
fcnxs, fcnxs... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.