text stringlengths 81 112k |
|---|
Validate label and its shape.
def _check_valid_label(self, label):
"""Validate label and its shape."""
if len(label.shape) != 2 or label.shape[1] < 5:
msg = "Label with shape (1+, 5+) required, %s received." % str(label)
raise RuntimeError(msg)
valid_label = np.where(np.... |
Helper function to estimate label shape
def _estimate_label_shape(self):
"""Helper function to estimate label shape"""
max_count = 0
self.reset()
try:
while True:
label, _ = self.next_sample()
label = self._parse_label(label)
m... |
Helper function to parse object detection label.
Format for raw label:
n \t k \t ... \t [id \t xmin\t ymin \t xmax \t ymax \t ...] \t [repeat]
where n is the width of header, 2 or larger
k is the width of each object annotation, can be arbitrary, at least 5
def _parse_label(self, label... |
Reshape iterator for data_shape or label_shape.
Parameters
----------
data_shape : tuple or None
Reshape the data_shape to the new shape if not None
label_shape : tuple or None
Reshape label shape to new shape if not None
def reshape(self, data_shape=None, label... |
Override the helper function for batchifying data
def _batchify(self, batch_data, batch_label, start=0):
"""Override the helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
... |
Override the function for returning next batch.
def next(self):
"""Override the function for returning next batch."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and... |
Override Transforms input data with specified augmentations.
def augmentation_transform(self, data, label): # pylint: disable=arguments-differ
"""Override Transforms input data with specified augmentations."""
for aug in self.auglist:
data, label = aug(data, label)
return (data, la... |
Checks if the new label shape is valid
def check_label_shape(self, label_shape):
"""Checks if the new label shape is valid"""
if not len(label_shape) == 2:
raise ValueError('label_shape should have length 2')
if label_shape[0] < self.label_shape[0]:
msg = 'Attempts to re... |
Display next image with bounding boxes drawn.
Parameters
----------
color : tuple
Bounding box color in RGB, use None for random color
thickness : int
Bounding box border thickness
mean : True or numpy.ndarray
Compensate for the mean to have b... |
Synchronize label shape with the input iterator. This is useful when
train/validation iterators have different label padding.
Parameters
----------
it : ImageDetIter
The other iterator to synchronize
verbose : bool
Print verbose log if true
Retur... |
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
def _generate_base_anchors(base_size, scales, ratios):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
... |
Return width, height, x center, and y center for an anchor (window).
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_... |
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
... |
Enumerate a set of anchors for each aspect ratio wrt an anchor.
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)
size = w * h
size_ratios = size / ratios
... |
Enumerate a set of anchors for each scale wrt an anchor.
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = AnchorGenerat... |
set atual shape of data
def prepare_data(args):
"""
set atual shape of data
"""
rnn_type = args.config.get("arch", "rnn_type")
num_rnn_layer = args.config.getint("arch", "num_rnn_layer")
num_hidden_rnn_list = json.loads(args.config.get("arch", "num_hidden_rnn_list"))
batch_size = args.conf... |
define deep speech 2 network
def arch(args, seq_len=None):
"""
define deep speech 2 network
"""
if isinstance(args, argparse.Namespace):
mode = args.config.get("common", "mode")
is_bucketing = args.config.getboolean("arch", "is_bucketing")
if mode == "train" or is_bucketing:
... |
Description : run lipnet training code using argument info
def main():
"""
Description : run lipnet training code using argument info
"""
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--epochs', type=int, default=100)
parse... |
visualize [cls, conf, x1, y1, x2, y2]
def vis_detection(im_orig, detections, class_names, thresh=0.7):
"""visualize [cls, conf, x1, y1, x2, y2]"""
import matplotlib.pyplot as plt
import random
plt.imshow(im_orig)
colors = [(random.random(), random.random(), random.random()) for _ in class_names]
... |
Check the difference between predictions from MXNet and CoreML.
def check_error(model, path, shapes, output = 'softmax_output', verbose = True):
"""
Check the difference between predictions from MXNet and CoreML.
"""
coreml_model = _coremltools.models.MLModel(path)
input_data = {}
input_data_co... |
Description : set gpu module
def setting_ctx(num_gpus):
"""
Description : set gpu module
"""
if num_gpus > 0:
ctx = [mx.gpu(i) for i in range(num_gpus)]
else:
ctx = [mx.cpu()]
return ctx |
Description : apply beam search for prediction result
def char_beam_search(out):
"""
Description : apply beam search for prediction result
"""
out_conv = list()
for idx in range(out.shape[0]):
probs = out[idx]
prob = probs.softmax().asnumpy()
line_string_proposals = ctcBeamS... |
Description : build network
def build_model(self, dr_rate=0, path=None):
"""
Description : build network
"""
#set network
self.net = LipNet(dr_rate)
self.net.hybridize()
self.net.initialize(ctx=self.ctx)
if path is not None:
self.load_model(p... |
Description : save parameter of network weight
def save_model(self, epoch, loss):
"""
Description : save parameter of network weight
"""
prefix = 'checkpoint/epoches'
file_name = "{prefix}_{epoch}_loss_{l:.4f}".format(prefix=prefix,
... |
Description : Setup the dataloader
def load_dataloader(self):
"""
Description : Setup the dataloader
"""
input_transform = transforms.Compose([transforms.ToTensor(), \
transforms.Normalize((0.7136, 0.4906, 0.3283), \
... |
Description : training for LipNet
def train(self, data, label, batch_size):
"""
Description : training for LipNet
"""
# pylint: disable=no-member
sum_losses = 0
len_losses = 0
with autograd.record():
losses = [self.loss_fn(self.net(X), Y) for X, Y in ... |
Description : Print sentence for prediction result
def infer(self, input_data, input_label):
"""
Description : Print sentence for prediction result
"""
sum_losses = 0
len_losses = 0
for data, label in zip(input_data, input_label):
pred = self.net(data)
... |
Description : training for LipNet
def train_batch(self, dataloader):
"""
Description : training for LipNet
"""
sum_losses = 0
len_losses = 0
for input_data, input_label in tqdm(dataloader):
data = gluon.utils.split_and_load(input_data, self.ctx, even_split=Fa... |
Description : inference for LipNet
def infer_batch(self, dataloader):
"""
Description : inference for LipNet
"""
sum_losses = 0
len_losses = 0
for input_data, input_label in dataloader:
data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False)... |
Description : Run training for LipNet
def run(self, epochs):
"""
Description : Run training for LipNet
"""
best_loss = sys.maxsize
for epoch in trange(epochs):
iter_no = 0
## train
sum_losses, len_losses = self.train_batch(self.train_dataload... |
Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret... |
Sample from independent normal distributions
Each element is an independent normal distribution.
Parameters
----------
mean : numpy.ndarray
Means of the normal distribution. Shape --> (batch_num, sample_dim)
var : numpy.ndarray
Variance of the normal distribution. Shape --> (batch_num,... |
Sample from independent mixture of gaussian (MoG) distributions
Each batch is an independent MoG distribution.
Parameters
----------
prob : numpy.ndarray
mixture probability of each gaussian. Shape --> (batch_num, center_num)
mean : numpy.ndarray
mean of each gaussian. Shape --> (batch... |
NCE-Loss layer under subword-units input.
def nce_loss_subwords(
data, label, label_mask, label_weight, embed_weight, vocab_size, num_hidden):
"""NCE-Loss layer under subword-units input.
"""
# get subword-units embedding.
label_units_embed = mx.sym.Embedding(data=label,
... |
Download the BSDS500 dataset and return train and test iters.
def get_dataset(prefetch=False):
"""Download the BSDS500 dataset and return train and test iters."""
if path.exists(data_dir):
print(
"Directory {} already exists, skipping.\n"
"To force download and extraction, dele... |
Run evaluation on cpu.
def evaluate(mod, data_iter, epoch, log_interval):
""" Run evaluation on cpu. """
start = time.time()
total_L = 0.0
nbatch = 0
density = 0
mod.set_states(value=0)
for batch in data_iter:
mod.forward(batch, is_train=False)
outputs = mod.get_outputs(merg... |
get two list, each list contains two elements: name and nd.array value
def _read(self):
"""get two list, each list contains two elements: name and nd.array value"""
_, data_img_name, label_img_name = self.f.readline().strip('\n').split("\t")
data = {}
label = {}
data[self.data_n... |
return one dict which contains "data" and "label"
def next(self):
"""return one dict which contains "data" and "label" """
if self.iter_next():
self.data, self.label = self._read()
return {self.data_name : self.data[0][1],
self.label_name : self.label[0][1... |
Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : ... |
Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pret... |
Get the model metadata from a given onnx graph.
def get_graph_metadata(self, graph):
"""
Get the model metadata from a given onnx graph.
"""
_params = set()
for tensor_vals in graph.initializer:
_params.add(tensor_vals.name)
input_data = []
for graph... |
Construct SymbolBlock from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block :gluon.nn.SymbolBlock
... |
Grab data in TensorProto and convert to numpy array.
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError:
raise ImportError("Onnx and protobuf need to be installed... |
Convert a list of AttributeProto to a dict, with names as keys.
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
at... |
Reshapes both modules for new input shapes.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
def reshape(self, data_shapes, label_shape... |
Installs and initializes SVRGOptimizer. The SVRGOptimizer is a wrapper class for a regular optimizer that is
passed in and a special AssignmentOptimizer to accumulate the full gradients. If KVStore is 'local' or None,
the full gradients will be accumulated locally without pushing to the KVStore. Otherw... |
Helper function to create a svrg optimizer. SVRG optimizer encapsulates two optimizers and
will redirect update() to the correct optimizer based on the key.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer: str
Name for SVRGOpti... |
Binds the symbols to construct executors for both two modules. This is necessary before one
can perform computation with the SVRGModule.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tup... |
Forward computation for both two modules. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predictin... |
Backward computation.
See Also
----------
:meth:`BaseModule.backward`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
... |
Computes the gradients over all data w.r.t weights of past
m epochs. For distributed env, it will accumulate full grads in the kvstore.
Parameters
----------
train_data: DataIter
Train data iterator
def update_full_grads(self, train_data):
"""Computes the gradients ... |
Accumulate gradients over all data in the KVStore. In distributed setting, each worker sees a portion of
data. The full gradients will be aggregated from each worker in the KVStore.
Parameters
----------
key: int or str
Key in the KVStore.
value: NDArray, RowSparseN... |
Allocate average of full gradients accumulated in the KVStore to each device.
Parameters
----------
key: int or str
Key in the kvstore.
value: List of NDArray, List of RowSparseNDArray
A list of average of the full gradients in the KVStore.
def _allocate_gradie... |
Calculates the gradient based on the SVRG update rule.
Parameters
----------
g_curr_batch_curr_weight : NDArray
gradients of current weight of self.mod w.r.t current batch of data
g_curr_batch_special_weight: NDArray
gradients of the weight of past m epochs of sel... |
Calculates gradients based on the SVRG update rule.
def _update_svrg_gradients(self):
"""Calculates gradients based on the SVRG update rule.
"""
param_names = self._exec_group.param_names
for ctx in range(self._ctx_len):
for index, name in enumerate(param_names):
... |
Trains the module parameters.
Parameters
----------
train_data : DataIter
Train DataIter.
eval_data : DataIter
If not ``None``, will be used as validation set and the performance
after each epoch will be evaluated.
eval_metric : str or EvalMet... |
Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-dev... |
find out which indexes correspond to given image set (train or val)
Parameters:
----------
shuffle : boolean
whether to shuffle the image list
Returns:
----------
entire list of images specified in the setting
def _load_image_set_index(self, shuffle):
... |
given image index, find out annotation path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of annotation file
def _label_path_from_index(self, index):
"""
given image index, find out annotation path... |
preprocess all ground-truths
Returns:
----------
labels packed in [num_images x max_num_objects x 5] tensor
def _load_image_labels(self):
"""
preprocess all ground-truths
Returns:
----------
labels packed in [num_images x max_num_objects x 5] tensor
... |
Get registrator function.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
def get_register_func(base_class, nickname):
"""Get registrator fun... |
Get registrator function that allow aliases.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
def get_alias_func(base_class, nickname):
"""Get... |
Get creator function
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a creator function
def get_create_func(base_class, nickname):
"""Get creator function
Para... |
Parse arguments.
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Diagnose script for checking the current system.')
choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network']
for choic... |
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
def clean_str(string):
"""Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/mast... |
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
def load_data_and_labels():
"""Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
... |
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length =... |
Maps sentencs and labels to vectors based on a vocabulary.
def build_input_data(sentences, labels, vocabulary):
"""Maps sentencs and labels to vectors based on a vocabulary."""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y] |
Map sentences and labels to vectors based on a pretrained word2vec
def build_input_data_with_word2vec(sentences, labels, word2vec_list):
"""
Map sentences and labels to vectors based on a pretrained word2vec
"""
x_vec = []
for sent in sentences:
vec = []
for word in sent:
... |
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
def load_data_with_word2vec(word2vec_list):
"""Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preproc... |
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
def load_data():
"""Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, lab... |
Generates a batch iterator for a dataset.
def batch_iter(data, batch_size, num_epochs):
"""Generates a batch iterator for a dataset."""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at eac... |
Load the pre-trained word2vec from file.
def load_pretrained_word2vec(infile):
"""Load the pre-trained word2vec from file."""
if isinstance(infile, str):
infile = open(infile)
word2vec_list = {}
for idx, line in enumerate(infile):
if idx == 0:
vocab_size, dim = line.strip()... |
return batch
def generate_batch(im_tensor, im_info):
"""return batch"""
data = [im_tensor, im_info]
data_shapes = [('data', im_tensor.shape), ('im_info', im_info.shape)]
data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
return data_batch |
VGG 16 layers network
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
def get_symbol(num_classes=1000, **kwargs):
"""
VGG 16 layers network
This is a modified version, with fc6/fc7 layers replaced by conv layer... |
Get multi-layer perceptron
def get_mlp():
"""Get multi-layer perceptron"""
data = mx.symbol.Variable('data')
fc1 = mx.symbol.CaffeOp(data_0=data, num_weight=2, name='fc1',
prototxt="layer{type:\"InnerProduct\" inner_product_param{num_output: 128} }")
act1 = mx.symbol.CaffeOp... |
LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
Haffner. "Gradient-based learning applied to document recognition."
Proceedings of the IEEE (1998)
def get_lenet():
"""LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
Haffner. "Gradient-based learning applied to document recognition."
Procee... |
Parse the arguments
def parse_args():
"""Parse the arguments"""
parser = argparse.ArgumentParser(description='train an image classifier on mnist')
parser.add_argument('--network', type=str, default='lenet',
help='the cnn to use (mlp | lenet | <path to network json file>')
parser... |
Implements forward computation.
is_train : bool, whether forwarding for training or testing.
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to out_data. 'null' means skip assignment, etc.
in_data : list of NDArray, input data.
out_data : list of NDArray, pre-allocated ... |
Implements backward computation
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to in_grad
out_grad : list of NDArray, gradient w.r.t. output data.
in_grad : list of NDArray, gradient w.r.t. input data. This is the output buffer.
def backward(self, req, out_grad, in_data, out_... |
Internal utility function to reset binding.
def _reset_bind(self):
"""Internal utility function to reset binding."""
self.binded = False
self._buckets = {}
self._curr_module = None
self._curr_bucket_key = None |
A list of names for data required by this module.
def data_names(self):
"""A list of names for data required by this module."""
if self.binded:
return self._curr_module.data_names
else:
_, data_names, _ = self._call_sym_gen(self._default_bucket_key)
return da... |
A list of names for the outputs of this module.
def output_names(self):
"""A list of names for the outputs of this module."""
if self.binded:
return self._curr_module.output_names
else:
symbol, _, _ = self._call_sym_gen(self._default_bucket_key)
return symbol... |
Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pair of dictionaries each mapping parameter names to NDArray values.
def get_params(self):
"""Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pai... |
Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Defaults to ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Defaults to ``None``. Existing auxiliary states... |
Gets states from all devices.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results... |
Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scal... |
Binding for a `BucketingModule` means setting up the buckets and binding the
executor for the default bucket key. Executors corresponding to other keys are
bound afterwards with `switch_bucket`.
Parameters
----------
data_shapes : list of (str, tuple)
This should cor... |
Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : lis... |
Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Defaults to `'local'`.
optimizer : str or Optimizer
Defaults to `'sgd'`
optimizer_params : dict
Defaults to `(('learning_rate', 0.01),)`. The default value is ... |
Prepares the module for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
Parameters
----------
data_batch : DataB... |
Forward computation.
Parameters
----------
data_batch : DataBatch
is_train : bool
Defaults to ``None``, in which case `is_train` is take as ``self.for_training``.
def forward(self, data_batch, is_train=None):
"""Forward computation.
Parameters
-----... |
Backward computation.
def backward(self, out_grads=None):
"""Backward computation."""
assert self.binded and self.params_initialized
self._curr_module.backward(out_grads=out_grads) |
Updates parameters according to installed optimizer and the gradient computed
in the previous forward-backward cycle.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
... |
Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should m... |
Gets the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
... |
Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluat... |
Installs monitor on all executors
def install_monitor(self, mon):
"""Installs monitor on all executors """
assert self.binded
self._monitor = mon
for mod in self._buckets.values():
mod.install_monitor(mon) |
Set status to recording/not recording. When recording, graph will be constructed
for gradient computation.
Parameters
----------
is_recording: bool
Returns
-------
previous state before this set.
def set_recording(is_recording): #pylint: disable=redefined-outer-name
"""Set status to r... |
Set status to training/predicting. This affects ctx.is_train in operator
running context. For example, Dropout will drop inputs randomly when
train_mode=True while simply passing through if train_mode=False.
Parameters
----------
train_mode: bool
Returns
-------
previous state before t... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.