text
stringlengths
81
112k
initialize all entries given annotation json file Parameters: ---------- anno_file: str annotation json file shuffle: bool whether to shuffle image list def _load_all(self, anno_file, shuffle): """ initialize all entries given annotation json fil...
Initializes the parameters and auxiliary states. def init_params(self, initializer=mx.init.Uniform(0.01), **kwargs): """Initializes the parameters and auxiliary states. """ self._module.init_params(initializer=initializer, **kwargs)
Forward computation. States from previous forward computation are carried to the current iteration if `carry_state` is set to `True`. def forward(self, data_batch, is_train=None, carry_state=True): """Forward computation. States from previous forward computation are carried to the current itera...
Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch. Gradients are clipped by their global norm if `max_norm` is set. Parameters ---------- max_norm: float, optional If set, clip values of all grad...
Clips gradient norm. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. The method is first used in `[ICML2013] On the difficulty of training recurrent neural networks` Parameters ------...
Image visualization and preservation :param title: title :param X: images to visualized :param name: saved picture`s name :return: def visual(title, X, name): """Image visualization and preservation :param title: title :param X: images to visualized :param name: saved picture`s name ...
Get the translation of images def transformer(data, label): """Get the translation of images""" # resize to 64x64 data = mx.image.imresize(data, 64, 64) # transpose from (64, 64, 3) to (3, 64, 64) data = mx.nd.transpose(data, (2, 0, 1)) # normalize to [-1, 1] data = data.astype(np.float32)/...
Load the dataset and split it to train/valid data :param dataset_name: string Returns: train_data: int array training dataset val_data: int array valid dataset def get_dataset(dataset_name): """Load the dataset and split it to train/valid data :param dataset_name: string ...
Get net G def get_netG(): """Get net G""" # build the generator netG = nn.Sequential() with netG.name_scope(): # input is Z, going into a convolution netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False)) netG.add(nn.BatchNorm()) netG.add(nn.Activation('relu')) ...
Get the netD def get_netD(): """Get the netD""" # build the discriminator netD = nn.Sequential() with netD.name_scope(): # input is (nc) x 64 x 64 netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False)) netD.add(nn.LeakyReLU(0.2)) # state size. (ndf) x 32 x 32 netD.add...
Get configurations for net def get_configurations(netG, netD): """Get configurations for net""" # loss loss = gluon.loss.SoftmaxCrossEntropyLoss() # initialize the generator and the discriminator netG.initialize(mx.init.Normal(0.02), ctx=ctx) netD.initialize(mx.init.Normal(0.02), ctx=ctx) ...
Entry point to dcgan def main(): """Entry point to dcgan""" print("|------- new changes!!!!!!!!!") # to get the dataset and net configuration train_data, val_data = get_dataset(dataset) netG = get_netG() netD = get_netD() loss, trainerG, trainerD = get_configurations(netG, netD) # set ...
Gets a customized logger. .. note:: `getLogger` is deprecated. Use `get_logger` instead. def getLogger(name=None, filename=None, filemode=None, level=WARNING): """Gets a customized logger. .. note:: `getLogger` is deprecated. Use `get_logger` instead. """ warnings.warn("getLogger is deprecated, ...
Gets a customized logger. Parameters ---------- name: str, optional Name of the logger. filename: str, optional The filename to which the logger's output will be sent. filemode: str, optional The file mode to open the file (corresponding to `filename`), default is 'a...
data preparation def transformer(data, label): """ data preparation """ data = mx.image.imresize(data, IMAGE_SIZE, IMAGE_SIZE) data = mx.nd.transpose(data, (2, 0, 1)) data = data.astype(np.float32) / 128.0 - 1 return data, label
helper function to get dataloader def get_training_data(batch_size): """ helper function to get dataloader""" return gluon.data.DataLoader( CIFAR10(train=True, transform=transformer), batch_size=batch_size, shuffle=True, last_batch='discard')
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. ResNet V2 model from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- version : int Version of ResNet. Opti...
Helper function for random generators. def _random_helper(random, sampler, params, shape, dtype, kwargs): """Helper function for random generators.""" if isinstance(params[0], Symbol): for i in params[1:]: assert isinstance(i, Symbol), \ "Distribution parameters must all hav...
Draw random samples from a Poisson distribution. Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). Samples will always be returned as a floating point data type. Parameters ---------- lam : float or Symbol, optional Expectation of interval, should...
Draw random samples from a generalized negative binomial distribution. Samples are distributed according to a generalized negative binomial distribution parametrized by *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the number of unsuccessful experim...
Concurrent sampling from multiple multinomial distributions. .. note:: The input distribution must be normalized, i.e. `data` must sum to 1 along its last dimension. Parameters ---------- data : Symbol An *n* dimensional array whose last dimension has length `k`, where `k...
Single-shot multi-box detection with VGG 16 layers ConvNet This is a modified version, with fc6/fc7 layers replaced by conv layers And the network is slightly smaller than original VGG 16 network This is a training network with losses Parameters: ---------- num_classes: int number of ob...
Single-shot multi-box detection with VGG 16 layers ConvNet This is a modified version, with fc6/fc7 layers replaced by conv layers And the network is slightly smaller than original VGG 16 network This is the detection network Parameters: ---------- num_classes: int number of object clas...
Creates a model from previously saved checkpoint. Parameters ---------- prefix : str path prefix of saved model files. You should have "prefix-symbol.json", "prefix-xxxx.params", and optionally "prefix-xxxx.states", where xxxx is the epoch number....
Saves current progress to checkpoint. Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training. Parameters ---------- prefix : str The file prefix to checkpoint to. epoch : int The current epoch number. save_optimizer_st...
Internal function to reset binded state. def _reset_bind(self): """Internal function to reset binded state.""" self.binded = False self._exec_group = None self._data_shapes = None self._label_shapes = None
Gets current parameters. Returns ------- `(arg_params, aux_params)` A pair of dictionaries each mapping parameter names to NDArray values. def get_params(self): """Gets current parameters. Returns ------- `(arg_params, aux_params)` A pai...
Initializes the parameters and auxiliary states. Parameters ---------- initializer : Initializer Called to initialize parameters if needed. arg_params : dict If not ``None``, should be a dictionary of existing arg_params. Initialization will be copied...
Assigns parameter and aux state values. Parameters ---------- arg_params : dict Dictionary of name to `NDArray`. aux_params : dict Dictionary of name to `NDArray`. allow_missing : bool If ``True``, params could contain missing values, and the ...
Binds the symbols to construct executors. This is necessary before one can perform computation with the module. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically...
Reshapes the module for new input shapes. Parameters ---------- data_shapes : list of (str, tuple) Typically is ``data_iter.provide_data``. label_shapes : list of (str, tuple) Typically is ``data_iter.provide_label``. def reshape(self, data_shapes, label_shapes=...
Installs and initializes optimizers. Parameters ---------- kvstore : str or KVStore Default `'local'`. optimizer : str or Optimizer Default `'sgd'` optimizer_params : dict Default `(('learning_rate', 0.01),)`. The default value is not a dictio...
Borrows optimizer from a shared module. Used in bucketing, where exactly the same optimizer (esp. kvstore) is used. Parameters ---------- shared_module : Module def borrow_optimizer(self, shared_module): """Borrows optimizer from a shared module. Used in bucketing, where exactl...
Forward computation. It supports data batches with different shapes, such as different batch sizes or different image sizes. If reshaping of data batch relates to modification of symbol or module, such as changing image layout ordering or switching from training to predicting, module reb...
Backward computation. See Also ---------- :meth:`BaseModule.backward`. Parameters ---------- out_grads : NDArray or list of NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called ...
Updates parameters according to the installed optimizer and the gradients computed in the previous forward-backward batch. When KVStore is used to update parameters for multi-device or multi-machine training, a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,...
Gets outputs of the previous forward computation. If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray` ...
Gets the gradients with respect to the inputs of the module. If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- ...
Gets states from all devices. If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. Parameters ---------- merge_multi_context : bool ...
Evaluates and accumulates evaluation metric on outputs of the last forward computation. See Also ---------- :meth:`BaseModule.update_metric`. Parameters ---------- eval_metric : EvalMetric Evaluation metric to use. labels : list of NDArray if `pre_sl...
Synchronizes parameters from devices to CPU. This function should be called after calling `update` that updates the parameters on the devices, before one can read the latest parameters from ``self._arg_params`` and ``self._aux_params``. For row_sparse parameters on devices, ther are pulled from...
Saves optimizer (updater) state to a file. Parameters ---------- fname : str Path to output states file. def save_optimizer_states(self, fname): """Saves optimizer (updater) state to a file. Parameters ---------- fname : str Path to outp...
Loads optimizer (updater) state from a file. Parameters ---------- fname : str Path to input states file. def load_optimizer_states(self, fname): """Loads optimizer (updater) state from a file. Parameters ---------- fname : str Path to i...
Prepares the module for processing a data batch. Usually involves switching bucket and reshaping. For modules that contain `row_sparse` parameters in KVStore, it prepares the `row_sparse` parameters based on the sparse_row_id_fn. When KVStore is used to update parameters for multi-devi...
Helper function for random generators. def _random_helper(random, sampler, params, shape, dtype, ctx, out, kwargs): """Helper function for random generators.""" if isinstance(params[0], NDArray): for i in params[1:]: assert isinstance(i, NDArray), \ "Distribution parameters ...
Draw random samples from a uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : float or NDArray, optional Lower boundary of the output interval. All values generated will be ...
Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray, optional Mean (centre) of the distribution. scale : float ...
Draw random samples from a normal (Gaussian) distribution. Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation). Parameters ---------- loc : float or NDArray Mean (centre) of the distribution. scale : float or NDArray...
r"""Draw samples from an exponential distribution. Its probability density function is .. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}), for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the inverse of the rate parameter \lambda = 1/\beta. Parameters -...
Draw random samples from a gamma distribution. Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). Parameters ---------- alpha : float or NDArray, optional The shape of the gamma distribution. Should be greater than zero. beta :...
Draw random samples from a negative binomial distribution. Samples are distributed according to a negative binomial distribution parametrized by *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). Samples will always be returned as a floating point data type. ...
Concurrent sampling from multiple multinomial distributions. .. note:: The input distribution must be normalized, i.e. `data` must sum to 1 along its last dimension. Parameters ---------- data : NDArray An *n* dimensional array whose last dimension has length `k`, where `...
Draw random samples from a discrete uniform distribution. Samples are uniformly distributed over the half-open interval *[low, high)* (includes *low*, but excludes *high*). Parameters ---------- low : int, required Lower boundary of the output interval. All values generated will be ...
Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial. def preprocess_uci_adult(data_name): """Some tricks of feature engineering are adapted from tensorflow's wide and deep tutorial. """ csv_columns = [ "age", "workclass", "fnlwgt", "education", "education...
Initialize parameters in the KVStore. Parameters with incomplete initialization are ignored. def _init_params(self): """Initialize parameters in the KVStore. Parameters with incomplete initialization are ignored. """ assert self._kv_initialized, "Cannot initialize parameters ...
Reset kvstore. def _reset_kvstore(self): """Reset kvstore.""" if self._kvstore and 'dist' in self._kvstore.type: raise RuntimeError("Cannot reset distributed KVStore.") self._kv_initialized = False self._kvstore = None self._distributed = None self._update_on...
Create kvstore. def _init_kvstore(self): """Create kvstore.""" config = self._kvstore_params # configure kvstore, update_on_kvstore and self._distributed on three cases: if self._contains_sparse_weight: # If weight is sparse, kvstore must be present and the weight must be up...
Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer. def set_learning_rate(self, lr): """Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new l...
Internal method to invoke pull operations on KVStore. If `full_idx` is set to True, `kv.pull` is preferred instead of `kv.row_sparse_pull`. def _row_sparse_pull(self, parameter, out, row_id, full_idx=False): """Internal method to invoke pull operations on KVStore. If `full_idx` is set to True, ...
Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to p...
For each parameter, reduce the gradients from different contexts. Should be called after `autograd.backward()`, outside of `record()` scope, and before `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update...
Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope, and after `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need t...
Saves trainer states (e.g. optimizer, momentum) to a file. Parameters ---------- fname : str Path to output states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be saved. def...
Loads trainer states (e.g. optimizer, momentum) from a file. Parameters ---------- fname : str Path to input states file. Note ---- `optimizer.param_dict`, which contains Parameter information (such as `lr_mult` and `wd_mult`) will not be loaded from...
sample 10 times of a size of 1000 for estimating the density of the sparse dataset def estimate_density(DATA_PATH, feature_size): """sample 10 times of a size of 1000 for estimating the density of the sparse dataset""" if not os.path.exists(DATA_PATH): raise Exception("Data is not there!") density ...
Execute the command line command. def exec_cmd(cmd, role, taskid, pass_env): """Execute the command line command.""" if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt': cmd[0] = './' + cmd[0] cmd = ' '.join(cmd) env = os.environ.copy() for k, v in pass_env.items(): ...
Submit function of local jobs. def submit(args): gpus = args.gpus.strip().split(',') """Submit function of local jobs.""" def mthread_submit(nworker, nserver, envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda ...
Iterates through p, identifying non-zero and non-repeating values, and returns them in a list Parameters ---------- p: list of int Returns ------- list of int def ctc_label(p): """Iterates through p, identifying non-zero and non-repeating values, and returns them in a l...
Removes trailing zeros in the list of integers and returns a new list of integers def _remove_blank(l): """ Removes trailing zeros in the list of integers and returns a new list of integers""" ret = [] for i, _ in enumerate(l): if l[i] == 0: break ret.app...
Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length def _lcs(p, l): """ Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length""" # Dynamic Programming Finding LCS if len(p) == 0: return 0 ...
Simple accuracy measure: number of 100% accurate predictions divided by total number def accuracy(self, label, pred): """ Simple accuracy measure: number of 100% accurate predictions divided by total number """ hit = 0. total = 0. batch_size = label.shape[0] for i in range(batch...
Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length def accuracy_lcs(self, label, pred): """ Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length""" hit = 0. total = 0. batch_size = label.shape[0] ...
Not particularly fast code to parse the text file and load into NDArrays. return two data iters, one for train, the other for validation. def get_movielens_iter(filename, batch_size): """Not particularly fast code to parse the text file and load into NDArrays. return two data iters, one for train, the othe...
Decode image from str buffer. Wrapper for cv2.imdecode that uses mx.nd.NDArray Parameters ---------- str_img : str str buffer read from image file flag : int same as flag for cv2.imdecode Returns ------- img : NDArray decoded image in (width, height, channels) ...
Decode image from str buffer. Wrapper for cv2.imresize that uses mx.nd.NDArray Parameters ---------- src : NDArray image in (width, height, channels) size : tuple target size in (width, height) interpolation : int same as interpolation for cv2.imresize Returns -...
Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image def copyMakeBorder(src, top, bot, ...
Crop src at fixed location, and (optionally) resize it to size def fixed_crop(src, x0, y0, w, h, size=None, interpolation=cv2.INTER_CUBIC): """Crop src at fixed location, and (optionally) resize it to size""" out = mx.nd.crop(src, begin=(y0, x0, 0), end=(y0+h, x0+w, int(src.shape[2]))) if size is not None ...
Randomly crop src with size. Upsample result if src is smaller than size def random_crop(src, size): """Randomly crop src with size. Upsample result if src is smaller than size""" h, w, _ = src.shape new_w, new_h = scale_down((w, h), size) x0 = random.randint(0, w - new_w) y0 = random.randint(0, h...
Randomly crop src with size. Randomize area and aspect ratio def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)): """Randomly crop src with size. Randomize area and aspect ratio""" h, w, _ = src.shape area = w*h for _ in range(10): new_area = random.uniform(min_area, 1.0) *...
Move iterator position forward def next(self): """Move iterator position forward""" batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3)) i = self.cur for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)): str_img = open(self.root+self.list[...
Check to see if the two arrays are the same size. def check_label_shapes(labels, preds, shape=0): """Check to see if the two arrays are the same size.""" if shape == 0: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label...
Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object. Parameters ---------- model_file : str ONNX model file name ctx : Context or list of Context Loads the model into one or many context(s). Returns ------- sym_block : :class:`~mxnet.gluon.Sym...
Model initialization. def get_model(model, ctx, opt): """Model initialization.""" kwargs = {'ctx': ctx, 'pretrained': opt.use_pretrained, 'classes': classes} if model.startswith('resnet'): kwargs['thumbnail'] = opt.use_thumbnail elif model.startswith('vgg'): kwargs['batch_norm'] = opt.b...
get dataset iterators def get_data_iters(dataset, batch_size, opt): """get dataset iterators""" if dataset == 'mnist': train_data, val_data = get_mnist_iterator(batch_size, (1, 28, 28), num_parts=kv.num_workers, part_index=kv.rank) elif dataset == '...
Set the learning rate to the initial value decayed by ratio every N epochs. def update_learning_rate(lr, trainer, epoch, ratio, steps): """Set the learning rate to the initial value decayed by ratio every N epochs.""" new_lr = lr * (ratio ** int(np.sum(np.array(steps) < epoch))) trainer.set_learning_rate(n...
Seeds the random number generators in MXNet. This affects the behavior of modules in MXNet that uses random number generators, like the dropout operator and `NDArray`'s random sampling operators. Parameters ---------- seed_state : int The random number seed. ctx : Context The ...
Draw random samples from a uniform distribtuion. def random_uniform(attrs, inputs, proto_obj): """Draw random samples from a uniform distribtuion.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " ...
Draw random samples from a Gaussian distribution. def random_normal(attrs, inputs, proto_obj): """Draw random samples from a Gaussian distribution.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " ...
Adding two tensors def add(attrs, inputs, proto_obj): """Adding two tensors""" new_attr = {} if 'broadcast' in attrs and attrs['broadcast'] == 1: broadcast_axis = attrs['axis'] op_value = translation_utils._fix_broadcast('broadcast_add', inputs, ...
Mean of all the input tensors. def mean(attrs, inputs, proto_obj): """Mean of all the input tensors.""" concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs] concat_sym = symbol.concat(*concat_input, dim=0) mean_sym = symbol.mean(concat_sym, axis=0) return mean_sym, attrs, in...
Returns indices of the maximum values along an axis def argmax(attrs, inputs, proto_obj): """Returns indices of the maximum values along an axis""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax operat...
Returns indices of the minimum values along an axis. def argmin(attrs, inputs, proto_obj): """Returns indices of the minimum values along an axis.""" axis = attrs.get('axis', 0) keepdims = attrs.get('keepdims', 1) argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims) # onnx argmax oper...
Elementwise maximum of arrays. MXNet maximum compares only two symbols at a time. ONNX can send more than two to compare. Breaking into multiple mxnet ops to compare two symbols at a time def maximum(attrs, inputs, proto_obj): """ Elementwise maximum of arrays. MXNet maximum compares only two s...
Elementwise minimum of arrays. def minimum(attrs, inputs, proto_obj): """Elementwise minimum of arrays.""" # MXNet minimum compares only two symbols at a time. # ONNX can send more than two to compare. # Breaking into multiple mxnet ops to compare two symbols at a time if len(inputs) > 1: m...
Joins input arrays along a given axis. def concat(attrs, inputs, proto_obj): """ Joins input arrays along a given axis. """ new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'}) return 'concat', new_attrs, inputs
Add padding to input tensor def pad(attrs, inputs, proto_obj): """ Add padding to input tensor""" new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width', 'value' : 'constant_value' ...
Batch normalization. def batch_norm(attrs, inputs, proto_obj): """Batch normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps', 'is_test': 'fix_gamma'}) new_attrs = translation_utils._remove_attributes(new...
Instance Normalization. def instance_norm(attrs, inputs, proto_obj): """Instance Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'}) new_attrs['eps'] = attrs.get('epsilon', 1e-5) return 'InstanceNorm', new_attrs, inputs
Leaky Relu function def leaky_relu(attrs, inputs, proto_obj): """Leaky Relu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01}) return 'LeakyReLU...
Elu function def _elu(attrs, inputs, proto_obj): """Elu function""" if 'alpha' in attrs: new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'}) else: new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0}) new_attrs = translation_utils._add_ext...