text stringlengths 81 112k |
|---|
Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is modified INPLACE.
def _fast_dataset(
variables: 'OrderedDict[Any, Variable]',
coord_variables: Mapping[Any, Variable],
) -> 'Dataset':
"""Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is... |
Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
def apply_dataset_vfunc(
func,
*args,
signature,
join='inner',
dataset_join='exact',
fill_value=_NO_FILL_VALUE,
exclude_dims=frozenset(),
keep_attrs=False
):
"""Apply a v... |
Iterate over selections of an xarray object in the provided order.
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from .groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim:... |
Apply a dataset or datarray level function over GroupBy, Dataset,
DataArray, Variable and/or ndarray objects.
def apply_groupby_func(func, *args):
"""Apply a dataset or datarray level function over GroupBy, Dataset,
DataArray, Variable and/or ndarray objects.
"""
from .groupby import GroupBy, peek_... |
Apply a ndarray level function over Variable and/or ndarray objects.
def apply_variable_ufunc(
func,
*args,
signature,
exclude_dims=frozenset(),
dask='forbidden',
output_dtypes=None,
output_sizes=None,
keep_attrs=False
):
"""Apply a ndarray level function over Variable and/or ndarra... |
Apply a ndarray level function over ndarray objects.
def apply_array_ufunc(func, *args, dask='forbidden'):
"""Apply a ndarray level function over ndarray objects."""
if any(isinstance(arg, dask_array_type) for arg in args):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a... |
Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
... |
Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
arrays: DataArray (or Variable) objects
Arrays to compute.
dims: str or tuple of strings, optional
Which dimensions to sum over.
If n... |
Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset with boolean dtype
When True, return values from `x`, otherwise returns values from `y`.
x, y : scalar, ... |
Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
def to_gufunc_string(self):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
... |
Return indices for an inverse permutation.
Parameters
----------
indices : 1D np.ndarray with dtype=int
Integer positions to assign elements to.
Returns
-------
inverse_permutation : 1D np.ndarray with dtype=int
Integer indices to take from the original array to create the
... |
Given a non-empty list, does it consist of contiguous integers?
def _is_contiguous(positions):
"""Given a non-empty list, does it consist of contiguous integers?"""
previous = positions[0]
for current in positions[1:]:
if current != previous + 1:
return False
previous = current
... |
Indices of the advanced indexes subspaces for mixed indexing and vindex.
def _advanced_indexer_subspaces(key):
"""Indices of the advanced indexes subspaces for mixed indexing and vindex.
"""
if not isinstance(key, tuple):
key = (key,)
advanced_index_positions = [i for i, k in enumerate(key)
... |
rolling window with padding.
def rolling_window(a, axis, window, center, fill_value):
""" rolling window with padding. """
pads = [(0, 0) for s in a.shape]
if center:
start = int(window / 2) # 10 -> 5, 9 -> 4
end = window - 1 - start
pads[axis] = (start, end)
else:
pad... |
Make an ndarray with a rolling window along axis.
Parameters
----------
a : array_like
Array to add rolling window to
axis: int
axis position along which rolling window will be applied.
window : int
Size of rolling window
Returns
-------
Array that is a view of ... |
Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if possible)
def _get_virtual_variable(variables, key, level_vars=None, dim_sizes=None):
"""Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if po... |
Calculate the dimensions corresponding to a set of variables.
Returns dictionary mapping from dimension names to sizes. Raises ValueError
if any of the dimension sizes conflict.
def calculate_dimensions(variables):
"""Calculate the dimensions corresponding to a set of variables.
Returns dictionary ma... |
Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
def merge_indexes(
indexes, # type: Dict[Any, Union[Any, List[Any]]]
variables, # type: Dict[Any, Variable]
coord_names, # type: Set
append=False, # type: bool
):
# type... |
Extract (multi-)indexes (levels) as variables.
Not public API. Used in Dataset and DataArray reset_index
methods.
def split_indexes(
dims_or_levels, # type: Union[Any, List[Any]]
variables, # type: OrderedDict[Any, Variable]
coord_names, # type: Set
level_coords, # type: Dict[Any, Any]
... |
Cast the given object to a Dataset.
Handles Datasets, DataArrays and dictionaries of variables. A new Dataset
object is only created if the provided object is not already one.
def as_dataset(obj):
"""Cast the given object to a Dataset.
Handles Datasets, DataArrays and dictionaries of variables. A new... |
Provide method for the key-autocompletions in IPython.
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [key for key in self._dataset._ipython_key_completions_()
if key not in self._dataset._coord_names] |
Set the initial value of Dataset variables and dimensions
def _set_init_vars_and_dims(self, data_vars, coords, compat):
"""Set the initial value of Dataset variables and dimensions
"""
both_data_and_coords = [k for k in data_vars if k in coords]
if both_data_and_coords:
rais... |
Create a new dataset from the contents of a backends.*DataStore
object
def load_store(cls, store, decoder=None):
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attribut... |
Dictionary of global attributes on this dataset
def attrs(self) -> Mapping:
"""Dictionary of global attributes on this dataset
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs |
Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, ... |
Manually trigger loading of this dataset's data from disk or a
remote source into memory and return a new dataset. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
... |
Persist all Dask arrays in memory
def _persist_inplace(self, **kwargs):
""" Persist all Dask arrays in memory """
# access .data to coerce everything to numpy or dask arrays
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
... |
Trigger computation, keeping data as dask arrays
This operation can be used to trigger computation on underlying dask
arrays, similar to ``.compute()``. However this operation keeps the
data as dask arrays. This is particularly useful when using the
dask.distributed scheduler and you ... |
Shortcut around __init__ for internal use when we want to skip
costly validation
def _construct_direct(cls, variables, coord_names, dims, attrs=None,
indexes=None, encoding=None, file_obj=None):
"""Shortcut around __init__ for internal use when we want to skip
costly v... |
Fastpath constructor for internal use.
Returns an object with optionally with replaced attributes.
Explicitly passed arguments are *not* copied when placed on the new
dataset. It is up to the caller to ensure that they have the right type
and are not used elsewhere.
def _replace( # t... |
Replace variables with recalculated dimensions.
def _replace_with_new_dims( # type: ignore
self: T,
variables: 'OrderedDict[Any, Variable]' = None,
coord_names: set = None,
attrs: 'Optional[OrderedDict]' = __default,
indexes: 'Optional[OrderedDict[Any, pd.Index]]' = __default,
... |
Returns a copy of this dataset.
If `deep=True`, a deep copy is made of each of the component variables.
Otherwise, a shallow copy of each of the component variable is made, so
that the underlying memory region of the new dataset is the same as in
the original dataset.
Use `data... |
Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
def _level_coords(self):
"""Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
"""
level_coords = OrderedDict()
for name, index in self.indexes.items():
... |
Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
def _copy_listed(self: T, names) -> T:
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""... |
Construct a DataArray by indexing this dataset
def _construct_dataarray(self, name) -> 'DataArray':
"""Construct a DataArray by indexing this dataset
"""
from .dataarray import DataArray
try:
variable = self._variables[name]
except KeyError:
_, name, var... |
List of places to look-up items for key-completion
def _item_sources(self):
"""List of places to look-up items for key-completion"""
return [self.data_vars, self.coords, {d: self[d] for d in self.dims},
LevelCoordinatesSource(self)] |
Helper function for equals and identical
def _all_compat(self, other, compat_str):
"""Helper function for equals and identical"""
# some stores (e.g., scipy) do not seem to preserve order, so don't
# require matching order for equality
def compat(x, y):
return getattr(x, co... |
Mapping of pandas.Index objects used for label based indexing
def indexes(self) -> 'Mapping[Any, pd.Index]':
"""Mapping of pandas.Index objects used for label based indexing
"""
if self._indexes is None:
self._indexes = default_indexes(self._variables, self._dims)
return Ind... |
Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new... |
Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
... |
Store dataset contents to a backends.*DataStore object.
def dump_to_store(self, store, **kwargs):
"""Store dataset contents to a backends.*DataStore object."""
from ..backends.api import dump_to_store
# TODO: rename and/or cleanup this method to make it more consistent
# with to_netcdf(... |
Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like object, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resul... |
Write dataset contents to a zarr group.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
Parameters
----------
store : MutableMapping or str, optional
Store or path ... |
Concise summary of a Dataset variables and attributes.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
See Also
--------
pandas.DataFrame.assign
netCDF's ncdump
def info(self, buf=None):
"""
Concise summary of a Dataset varia... |
Block dimensions for this dataset's data or None if it's not a dask
array.
def chunks(self):
"""Block dimensions for this dataset's data or None if it's not a dask
array.
"""
chunks = {}
for v in self.variables.values():
if v.chunks is not None:
... |
Coerce all arrays in this dataset into dask arrays with the given
chunks.
Non-dask arrays in this dataset will be converted to dask arrays. Dask
arrays will be rechunked to the given chunk sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along th... |
Here we make sure
+ indexer has a valid keys
+ indexer is in a valid data type
+ string indexers are cast to the appropriate date type if the
associated index is a DatetimeIndex or CFTimeIndex
def _validate_indexers(
self, indexers: Mapping,
) -> List[Tuple[Any, Union[slic... |
Extract coordinates from indexers.
Returns an OrderedDict mapping from coordinate name to the
coordinate variable.
Only coordinate with a name different from any of self.variables will
be attached.
def _get_indexers_coords_and_indexes(self, indexers):
""" Extract coordinates f... |
Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
ind... |
Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This ma... |
Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimen... |
Returns a new dataset with each array indexed pointwise by tick
labels along the specified dimension(s).
In contrast to `Dataset.isel_points`, indexers for this method should
use labels instead of integers.
In contrast to `Dataset.sel`, this method selects points along the
diag... |
Conform this object onto the indexes of another object, filling
in missing values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordin... |
Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
indexers : dict. optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate
... |
Multidimensional interpolation of Dataset.
Parameters
----------
coords : dict, optional
Mapping from dimension names to the new coordinates.
New coordinate can be a scalar, array-like or DataArray.
If DataArrays are passed as new coordates, their dimensions ... |
Interpolate this object onto the coordinates of another object,
filling the out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provid... |
Returns a new object with renamed variables and dimensions.
Parameters
----------
name_dict : dict-like, optional
Dictionary whose keys are current variable or dimension names and
whose values are the desired names.
inplace : bool, optional
If True, r... |
Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
... |
Return a new object with an additional axis (or axes) inserted at
the corresponding position in the array shape.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
Parameters
----------
dim : str, sequence of str,... |
Set Dataset (multi-)indexes using one or more existing coordinates
or variables.
Parameters
----------
indexes : {dim: index, ...}
Mapping from names matching dimensions and values given
by (lists of) the names of existing coordinates or variables to set
... |
Reset the specified index(es) or multi-index level(s).
Parameters
----------
dims_or_levels : str or list
Name(s) of the dimension(s) and/or multi-index level(s) that will
be reset.
drop : bool, optional
If True, remove the specified indexes and/or mu... |
Rearrange index levels using input order.
Parameters
----------
dim_order : optional
Mapping from names matching dimensions and values given
by lists representing new level orders. Every given dimension
must have a multi-index.
inplace : bool, optiona... |
Unstack existing dimensions corresponding to MultiIndexes into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : str or sequence of str, optional
Dimension(s) over which to unstack. By default unstacks all
Mult... |
Update this dataset's variables with those from another dataset.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables with which to update this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-pl... |
Merge the arrays of two datasets into a single dataset.
This method generally not allow for overriding data, with the exception
of attributes, which are ignored on the second dataset. Variables with
the same name are checked for conflicts via the equals or identical
methods.
Pa... |
Drop variables or index labels from this dataset.
Parameters
----------
labels : scalar or list of scalars
Name(s) of variables or index labels to drop.
dim : None or str, optional
Dimension along which to drop index labels. By default (if
``dim is No... |
Drop dimensions and associated variables from this dataset.
Parameters
----------
drop_dims : str or list
Dimension or dimensions to drop.
Returns
-------
obj : Dataset
The dataset without the given dimensions (or any variables
contai... |
Return a new Dataset object with all array dimensions transposed.
Although the order of dimensions on each array will change, the dataset
dimensions themselves will remain in fixed (sorted) order.
Parameters
----------
*dims : str, optional
By default, reverse the d... |
Returns a new dataset with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : str
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {'any', ... |
Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
... |
Interpolate values according to different methods.
Parameters
----------
dim : str
Specifies the dimension along which to interpolate.
method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial', 'barycentric', 'krog', 'pchip',
... |
Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values ... |
Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values... |
Combine two Datasets, default to data_vars of self.
The new coordinates follow the normal broadcasting and alignment rules
of ``join='outer'``. Vacant cells in the expanded coordinates are
filled with np.nan.
Parameters
----------
other : DataArray
Used to ... |
Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim ... |
Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bo... |
Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
variables : mapping, value pairs
Mapping from variables names to the new values. If the new values
are callable, they... |
Convert this dataset into an xarray.DataArray
The data variables of this dataset will be broadcast against each other
and stacked along the first axis of the new array. All coordinates of
this dataset will remain coordinates.
Parameters
----------
dim : str, optional
... |
Convert a pandas.DataFrame into an xarray.Dataset
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
into a tensor product of one-dimensional indices (filling in missing
values with NaN). This method w... |
Convert this dataset into a dask.dataframe.DataFrame.
The dimensions, coordinates and data variables in this dataset form
the columns of the DataFrame.
Parameters
----------
dim_order : list, optional
Hierarchical dimension order for the resulting dataframe. All
... |
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
Parameters
----... |
Convert a dictionary into an xarray.Dataset.
Input dict can take several forms::
d = {'t': {'dims': ('t'), 'data': t},
'a': {'dims': ('t'), 'data': x},
'b': {'dims': ('t'), 'data': y}}
d = {'coords': {'t': {'dims': 't', 'data': t,
... |
Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : str, optional
Dimension over which to calculate the finite difference.
n : int, optional
The number of times values are differenced.
label : str, optional
... |
Shift this dataset by an offset along one or more dimensions.
Only data variables are moved; coordinates stay in place. This is
consistent with the behavior of ``shift`` in pandas.
Parameters
----------
shifts : Mapping with the form of {dim: offset}
Integer offset ... |
Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : dict, optional
A... |
Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to th... |
Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements for each variable
in the Dataset.
Parameters
----------
q : float in range of [0,1] (or sequence of floats)
Quantile to compute, which must be betw... |
Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within
that set.
Ranks begin at 1, not 0. If pct is True, computes percentage ranks.
NaNs in the input array are returned as NaNs.
... |
Differentiate with the second order accurate central
differences.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
coord: str
The coordinate to be used to compute the gr... |
integrate the array with the trapezoidal rule.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
dim: str, or a sequence of str
Coordinate(s) used for the integration.
da... |
Returns a ``Dataset`` with variables that match specific conditions.
Can pass in ``key=value`` or ``key=callable``. A Dataset is returned
containing only the variables for which all the filter tests pass.
These tests are either ``key=value`` for which the attribute ``key``
has the exac... |
Lazily apply an element-wise function to an array.
Parameters
----------
array : any valid value of Variable._data
func : callable
Function to apply to indexed slices of an array. For use with dask,
this should be a pickle-able object.
dtype : coercible to np.dtype
Dtype for... |
A convenience function which pops a key k from source to dest.
None values are not passed on. If k already exists in dest an
error is raised.
def pop_to(source, dest, key, name=None):
"""
A convenience function which pops a key k from source to dest.
None values are not passed on. If k already ex... |
Mask all matching values in a NumPy arrays.
def _apply_mask(
data: np.ndarray,
encoded_fill_values: list,
decoded_fill_value: Any,
dtype: Any,
) -> np.ndarray:
"""Mask all matching values in a NumPy arrays."""
data = np.asarray(data, dtype=dtype)
condition = False
for fv in encoded_fill... |
Return a float dtype that can losslessly represent `dtype` values.
def _choose_float_dtype(dtype, has_offset):
"""Return a float dtype that can losslessly represent `dtype` values."""
# Keep float32 as-is. Upcast half-precision to single-precision,
# because float16 is "intended for storage but not comput... |
wrapper for datasets
def _apply_over_vars_with_dim(func, self, dim=None, **kwargs):
'''wrapper for datasets'''
ds = type(self)(coords=self.coords, attrs=self.attrs)
for name, var in self.data_vars.items():
if dim in var.dims:
ds[name] = func(var, dim=dim, **kwargs)
else:
... |
get index to use for x values in interpolation.
If use_coordinate is True, the coordinate that shares the name of the
dimension along which interpolation is being performed will be used as the
x values.
If use_coordinate is False, the x values are set as an equally spaced
sequence.
def get_clean_... |
Interpolate values according to different methods.
def interp_na(self, dim=None, use_coordinate=True, method='linear', limit=None,
**kwargs):
'''Interpolate values according to different methods.'''
if dim is None:
raise NotImplementedError('dim is a required argument')
if limit is ... |
helper function to apply interpolation along 1 dimension
def func_interpolate_na(interpolator, x, y, **kwargs):
'''helper function to apply interpolation along 1 dimension'''
# it would be nice if this wasn't necessary, works around:
# "ValueError: assignment destination is read-only" in assignment below
... |
inverse of ffill
def _bfill(arr, n=None, axis=-1):
'''inverse of ffill'''
import bottleneck as bn
arr = np.flip(arr, axis=axis)
# fill
arr = bn.push(arr, axis=axis, n=n)
# reverse back to original
return np.flip(arr, axis=axis) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.