text stringlengths 81 112k |
|---|
PRelu function
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs |
Selu function
def _selu(attrs, inputs, proto_obj):
"""Selu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'})
return 'LeakyReLU', new_attrs, inputs |
Softmax function.
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs |
Applies the sofplus activation function element-wise to the input.
def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs |
Compute N-D convolution on (N+2)-D input.
def conv(attrs, inputs, proto_obj):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
... |
Computes transposed convolution of the input tensor.
def deconv(attrs, inputs, proto_obj):
"""Computes transposed convolution of the input tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : ... |
Applies a linear transformation: Y=XWT+b.
def fully_connected(attrs, inputs, proto_obj):
"""Applies a linear transformation: Y=XWT+b."""
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs))
new_attrs = tran... |
Performs max pooling on the input.
def global_maxpooling(attrs, inputs, proto_obj):
"""Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
... |
Performs avg pooling on the input.
def global_avgpooling(attrs, inputs, proto_obj):
"""Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
... |
Performs global lp pooling on the input.
def global_lppooling(attrs, inputs, proto_obj):
"""Performs global lp pooling on the input."""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
... |
Performs general matrix multiplication and accumulation
def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
t... |
Local Response Normalization.
def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' ... |
Dropout Regularization.
def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'rati... |
Reshape the given array by the shape attribute.
def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in... |
Cast input to a given dtype
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - htt... |
Splits an array along a particular axis into multiple sub-arrays.
def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else []
new_attrs = translation_utils._fix_attribute_names(attrs,
... |
Returns a slice of the input tensor along multiple axes.
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
... |
Transpose the input array.
def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs |
Remove single-dimensional entries from the shape of a tensor.
def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return... |
Inserts a new axis of size 1 into the array shape
def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mx... |
Flattens the input array into a 2-D array by collapsing the higher dimensions.
def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
... |
Clips (limits) the values in an array.
def clip(attrs, inputs, proto_obj):
"""Clips (limits) the values in an array."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
'max' : 'a_max'})
if 'a_max' not in new_attrs:
... |
Returns element-wise result of base element raised to powers from exp element.
def power(attrs, inputs, proto_obj):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs:
... |
Reduce the array along a given axis by maximum value
def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs |
Reduce the array along a given axis by mean value
def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs |
Reduce the array along a given axis by minimum value
def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs |
Reduce the array along a given axis by sum value
def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs |
Reduce the array along a given axis by product value
def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs |
Reduce the array along a given axis by log sum value
def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
... |
Reduce the array along a given axis by log sum exp value
def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axi... |
Reduce the array along a given axis by sum square value
def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keep... |
Reduce input tensor by l1 normalization.
def reduce_l1(attrs, inputs, proto_obj):
"""Reduce input tensor by l1 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
... |
Reduce input tensor by l2 normalization.
def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs |
Average pooling
def avg_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
... |
LP Pooling
def lp_pooling(attrs, inputs, proto_obj):
"""LP Pooling"""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 's... |
Max ROI Pooling.
def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spati... |
Rearranges data from depth into blocks of spatial data.
def depthtospace(attrs, inputs, proto_obj):
"""Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "depth_to_space", new_attrs, inputs |
Rearranges blocks of spatial data into depth.
def spacetodepth(attrs, inputs, proto_obj):
"""Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "space_to_depth", new_attrs, inputs |
Returns batched one-hot vectors.
def hardmax(attrs, inputs, proto_obj):
"""Returns batched one-hot vectors."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
axis = int(attrs.get('axis', 1))
axis = axis if axis >= 0 else len(input_shape)... |
ONNX does not have eps attribute, so cannot map it to L2normalization in MXNet
without that, it works as norm operator discussion in PR:
https://github.com/onnx/onnx/pull/1330
def lpnormalization(attrs, inputs, proto_obj):
"""ONNX does not have eps attribute, so cannot map it to L2normalization in MXNet
... |
download mp4s
def download_mp4(from_idx, to_idx, _params):
"""
download mp4s
"""
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = 's' + str(idx)
save_folder = '{src_path}/{nm}'.format(src_path=_params['src_path'], nm=name)
if idx == 0 or os.path.isdir... |
download aligns
def download_align(from_idx, to_idx, _params):
"""
download aligns
"""
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = 's' + str(idx)
if idx == 0:
continue
script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/align/{nm}.... |
Run unit tests in the emulator and copy the results back to the host through the mounted
volume in /mxnet
def run_ut_py3_qemu():
"""Run unit tests in the emulator and copy the results back to the host through the mounted
volume in /mxnet"""
from vmcontrol import VM
with VM() as vm:
qemu_pro... |
this runs inside the vm
def run_ut_python3_qemu_internal():
"""this runs inside the vm"""
pkg = glob.glob('mxnet_dist/*.whl')[0]
logging.info("=== NOW Running inside QEMU ===")
logging.info("PIP Installing %s", pkg)
check_call(['sudo', 'pip3', 'install', pkg])
logging.info("PIP Installing mxnet... |
Return subword-units presentation, given a word/token.
def _get_subword_units(token, gram):
"""Return subword-units presentation, given a word/token.
"""
if token == '</s>': # special token for padding purpose.
return [token]
t = '#' + token + '#'
return [t[i:i + gram] for i in range(0, le... |
Train the model using Caffe operator in MXNet
def fit(args, network, data_loader, eval_metrics=None, batch_end_callback=None):
"""Train the model using Caffe operator in MXNet"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s... |
Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size)
float vector.
def preprocess(self, img):
"""
Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size)
float vector.
"""
# Crop, down-sample, erase background and set foreground to 1.
... |
Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDAr... |
Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle... |
Returns a dispatch code for calling basic or advanced indexing functions.
def _get_indexing_dispatch_code(key):
"""Returns a dispatch code for calling basic or advanced indexing functions."""
if isinstance(key, (NDArray, np.ndarray)):
return _NDARRAY_ADVANCED_INDEXING
elif isinstance(key, list):
... |
Given start, stop, step and array length, return
absolute values of start, stop, and step for generating index range.
The returned values have been compensated by adding length if they
are less than zero for all the cases but slice(None, None, -1).
Note that the returned value of stop is not necessarily... |
Given data and index shapes, get the output `NDArray` shape.
This basically implements the infer shape logic of op gather_nd.
def _get_oshape_of_gather_nd_op(dshape, ishape):
"""Given data and index shapes, get the output `NDArray` shape.
This basically implements the infer shape logic of op gather_nd."""
... |
Given start, stop, and stop, calculate the number of elements
of this slice.
def _get_dim_size(start, stop, step):
"""Given start, stop, and stop, calculate the number of elements
of this slice."""
assert step != 0
if step > 0:
assert start < stop
dim_size = (stop - start - 1) // st... |
Given two shapes that are not identical, find the shape
that both input shapes can broadcast to.
def _get_broadcast_shape(shape1, shape2):
"""Given two shapes that are not identical, find the shape
that both input shapes can broadcast to."""
if shape1 == shape2:
return shape1
length1 = len... |
Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.cu... |
Returns a new array of given shape and type, filled with the given value `val`.
Parameters
--------
shape : int or tuple of int
The shape of the new array.
val : scalar
Fill value.
ctx : Context, optional
Device context (default is the current default context).
dtype : `... |
Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default i... |
Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int or sequence of int
Original position of the axes to move. Can be negative but... |
Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.... |
Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : functi... |
Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are br... |
Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the array... |
Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
... |
Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
... |
Returns element-wise modulo of the input arrays with broadcasting.
Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
P... |
Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the ar... |
Returns element-wise maximum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
... |
Returns element-wise minimum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
... |
Returns the result of element-wise **equal to** (==) comparison operation with
broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are same,
otherwise return 0(false).
Equivalent to ``lhs == rhs`` and ``mx.nd.broadcast_equal(lhs, rhs)``.
.. note::
If t... |
Returns the result of element-wise **not equal to** (!=) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are different,
otherwise return 0(false).
Equivalent to ``lhs != rhs`` and ``mx.nd.broadcast_not_equal(lhs, rhs)``.
.. note::... |
Returns the result of element-wise **greater than** (>) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than rhs,
otherwise return 0(false).
Equivalent to ``lhs > rhs`` and ``mx.nd.broadcast_greater(lhs, rhs)``.
.. note::
... |
Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, ... |
Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If ... |
Returns the result of element-wise **lesser than or equal to** (<=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are
lesser than equal to rhs, otherwise return 0(false).
Equivalent to ``lhs <= rhs`` and ``mx.nd.broadcast_lesser_equal(lhs, rhs... |
Returns the result of element-wise **logical and** comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements and rhs elements
are true, otherwise return 0(false).
Equivalent to ``lhs and rhs`` and ``mx.nd.broadcast_logical_and(lhs, rhs)``.
.. note::
... |
Returns the result of element-wise **logical or** comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements or rhs elements
are true, otherwise return 0(false).
Equivalent to ``lhs or rhs`` and ``mx.nd.broadcast_logical_or(lhs, rhs)``.
.. note::
... |
Returns the result of element-wise **logical xor** comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements or rhs elements
are true, otherwise return 0(false).
Equivalent to ``bool(lhs) ^ bool(rhs)`` and ``mx.nd.broadcast_logical_xor(lhs, rhs)``.
.... |
DEPRECATED, use ``concat`` instead
Parameters
----------
arrays : list of `NDArray`
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool... |
DEPRECATED, use mx.img instead
Parameters
----------
str_img : str
Binary image data
clip_rect : iterable of 4 int
Clip decoded image to rectangle (x0, y0, x1, y1).
out : NDArray
Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w).
index : int
... |
Returns a new array filled with all zeros, with the given shape and type.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional... |
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N: int
Number of rows in the output.
M: int, optional
Number of columns in the output. If 0, defaults to N.
k: int, optional
Index of the diagonal: 0 (the default) refers to the main diag... |
Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optiona... |
Compute the histogram of the input data.
Parameters
----------
a : NDArray
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars
If bins is an int, it defines the number of equal-width bins in the
given range (10, by default). If bins ... |
Split an array into multiple sub-arrays.
Parameters
----------
ary : NDArray
Array to be divided into sub-arrays.
indices_or_sections : int or tuple of ints
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is... |
Returns a reference view of NDArray that represents as DLManagedTensor until
all previous write operations on the current array are finished.
Parameters
----------
data: NDArray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of ND... |
Returns a reference view of NDArray that represents as DLManagedTensor until
all previous read/write operations on the current array are finished.
Parameters
----------
data: NDArray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view ... |
Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_... |
Returns an MXNet's NDArray backed by Numpy's ndarray.
Parameters
----------
ndarray: numpy.ndarray
input data
zero_copy: bool
Whether we use DLPack's zero-copy conversion to convert to MXNet's NDArray.
This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOU... |
Returns an index array for use in scatter_nd and gather_nd.
def _get_index_nd(self, key):
"""Returns an index array for use in scatter_nd and gather_nd."""
def _is_advanced_index(index):
"""The definition of advanced index here includes integers as well, while
integers are consi... |
Given value and vshape, create an `NDArray` from value with the same
context and dtype as the current one and broadcast it to vshape.
def _prepare_value_nd(self, value, vshape):
"""Given value and vshape, create an `NDArray` from value with the same
context and dtype as the current one and broa... |
This function is called by __setitem__ when key is a basic index, i.e.
an integer, or a slice, or a tuple of integers and slices. No restrictions
on the values of slices' steps.
def _set_nd_basic_indexing(self, key, value):
"""This function is called by __setitem__ when key is a basic index, i.... |
This function is called by __setitem__ when key is an advanced index.
def _set_nd_advanced_indexing(self, key, value):
"""This function is called by __setitem__ when key is an advanced index."""
indices = self._get_index_nd(key)
vshape = _get_oshape_of_gather_nd_op(self.shape, indices.shape)
... |
This function is called when key is a slice, or an integer,
or a tuple of slices or integers
def _get_nd_basic_indexing(self, key):
"""This function is called when key is a slice, or an integer,
or a tuple of slices or integers"""
shape = self.shape
if isinstance(key, integer_ty... |
Performs a synchronized copy from the `source_array` to the current array.
This is called through ``x[:] = source_array``, where the `source_array`
is a `numpy.ndarray` or array-like object.
This function blocks until all the pending read/write operations with respect
to the current `NDA... |
Returns a sliced NDArray that shares memory with the current one.
This is called through ``x[start:stop]``.
Parameters
----------
start : int
Starting inclusive index of slice in the first dim.
stop : int
Finishing exclusive index of slice in the first di... |
Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with t... |
Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int, or n ints
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``.
Some d... |
Broadcasts the input array to a new shape.
Broadcasting is only allowed on axes with size 1. The new shape cannot change
the number of dimensions.
For example, you could broadcast from shape (2, 1) to (2, 3), but not from
shape (2, 3) to (2, 3, 3).
Parameters
----------... |
Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>>... |
Device context of the array.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.context
cpu(0)
>>> type(x.context)
<class 'mxnet.context.Context'>
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> y.context
gpu(0)
def context(self):
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.