text
stringlengths
81
112k
forward fill missing values def ffill(arr, dim=None, limit=None): '''forward fill missing values''' import bottleneck as bn axis = arr.get_axis_num(dim) # work around for bottleneck 178 _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc(bn.push, arr, ...
backfill missing values def bfill(arr, dim=None, limit=None): '''backfill missing values''' axis = arr.get_axis_num(dim) # work around for bottleneck 178 _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc(_bfill, arr, dask='parallelized', ...
helper function to select the appropriate interpolator class returns interpolator class and keyword arguments for the class def _get_interpolator(method, vectorizeable_only=False, **kwargs): '''helper function to select the appropriate interpolator class returns interpolator class and keyword arguments f...
helper function to select the appropriate interpolator class returns interpolator class and keyword arguments for the class def _get_interpolator_nd(method, **kwargs): '''helper function to select the appropriate interpolator class returns interpolator class and keyword arguments for the class ''' ...
helper function to determine values that can be filled when limit is not None def _get_valid_fill_mask(arr, dim, limit): '''helper function to determine values that can be filled when limit is not None''' kw = {dim: limit + 1} # we explicitly use construct method to avoid copy. new_dim = utils....
Speed up for linear and nearest neighbor method. Only consider a subspace that is needed for the interpolation def _localize(var, indexes_coords): """ Speed up for linear and nearest neighbor method. Only consider a subspace that is needed for the interpolation """ indexes = {} for dim, [x, new...
Make x and new_x float. This is particulary useful for datetime dtype. x, new_x: tuple of np.ndarray def _floatize_x(x, new_x): """ Make x and new_x float. This is particulary useful for datetime dtype. x, new_x: tuple of np.ndarray """ x = list(x) new_x = list(new_x) for i in range...
Make an interpolation of Variable Parameters ---------- var: Variable index_coords: Mapping from dimension name to a pair of original and new coordinates. Original coordinates should be sorted in strictly ascending order. Note that all the coordinates should be Variable objects....
multi-dimensional interpolation for array-like. Interpolated axes should be located in the last position. Parameters ---------- var: np.ndarray or dask.array.Array Array to be interpolated. The final dimension is interpolated. x: a list of 1d array. Original coordinates. Should not ...
Put coord, value in template and truncate at maxchar def _nicetitle(coord, value, maxchar, template): """ Put coord, value in template and truncate at maxchar """ prettyvalue = format_item(value, quote_strings=False) title = template.format(coord=coord, value=prettyvalue) if len(title) > maxch...
Convenience method to call xarray.plot.FacetGrid from 2d plotting methods kwargs are the arguments to 2d plotting method def _easy_facetgrid(data, plotfunc, kind, x=None, y=None, row=None, col=None, col_wrap=None, sharex=True, sharey=True, aspect=None, size=None, subplot_kw...
Apply a plotting function to a 2d facet's subset of the data. This is more convenient and less general than ``FacetGrid.map`` Parameters ---------- func : callable A plotting function with the same signature as a 2d xarray plotting method such as `xarray.plot.im...
Finalize the annotations and layout. def _finalize_grid(self, *axlabels): """Finalize the annotations and layout.""" if not self._finalized: self.set_axis_labels(*axlabels) self.set_titles() self.fig.tight_layout() for ax, namedict in zip(self.axes.flat,...
Draw a colorbar def add_colorbar(self, **kwargs): """Draw a colorbar """ kwargs = kwargs.copy() if self._cmap_extend is not None: kwargs.setdefault('extend', self._cmap_extend) if 'label' not in kwargs: kwargs.setdefault('label', label_from_attrs(self.dat...
Set axis labels on the left column and bottom row of the grid. def set_axis_labels(self, x_var=None, y_var=None): """Set axis labels on the left column and bottom row of the grid.""" if x_var is not None: if x_var in self.data.coords: self._x_var = x_var self...
Label the x axis on the bottom row of the grid. def set_xlabels(self, label=None, **kwargs): """Label the x axis on the bottom row of the grid.""" if label is None: label = label_from_attrs(self.data[self._x_var]) for ax in self._bottom_axes: ax.set_xlabel(label, **kwarg...
Label the y axis on the left column of the grid. def set_ylabels(self, label=None, **kwargs): """Label the y axis on the left column of the grid.""" if label is None: label = label_from_attrs(self.data[self._y_var]) for ax in self._left_axes: ax.set_ylabel(label, **kwarg...
Draw titles either above each facet or on the grid margins. Parameters ---------- template : string Template for plot titles containing {coord} and {value} maxchar : int Truncate titles at maxchar kwargs : keyword args additional arguments to ...
Set and control tick behavior Parameters ---------- max_xticks, max_yticks : int, optional Maximum number of labeled ticks to plot on x, y axes fontsize : string or int Font size as used by matplotlib text Returns ------- self : FacetGrid...
Apply a plotting function to each facet's subset of the data. Parameters ---------- func : callable A plotting function that takes data and keyword arguments. It must plot to the currently active matplotlib Axes and take a `color` keyword argument. If facetin...
Obtain the bins and their respective labels for resampling operations. Parameters ---------- index : CFTimeIndex Index object to be resampled (e.g., CFTimeIndex named 'time'). freq : xarray.coding.cftime_offsets.BaseCFTimeOffset The offset object representing target conversion a.k.a. re...
This is required for determining the bin edges resampling with daily frequencies greater than one day, month end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-...
Get the correct starting and ending datetimes for the resampled CFTimeIndex range. Parameters ---------- first : cftime.datetime Uncorrected starting datetime object for resampled CFTimeIndex range. Usually the min of the original CFTimeIndex. last : cftime.datetime Uncorrec...
First and last offsets should be calculated from the start day to fix an error cause by resampling across multiple days when a one day period is not a multiple of the frequency. See https://github.com/pandas-dev/pandas/issues/8683 Parameters ---------- first : cftime.datetime A datetime...
Exact computation of b - a Assumes: a = a_0 + a_m b = b_0 + b_m Here a_0, and b_0 represent the input dates rounded down to the nearest second, and a_m, and b_m represent the remaining microseconds associated with date a and date b. We can then express the value of b - a as: ...
Meant to reproduce the results of the following grouper = pandas.Grouper(...) first_items = pd.Series(np.arange(len(index)), index).groupby(grouper).first() with index being a CFTimeIndex instead of a DatetimeIndex. def first_items(self, index): """Mean...
align(*objects, join='inner', copy=True, indexes=None, exclude=frozenset()) Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along eac...
Align objects for merging, recursing into dictionary values. This function is not public API. def deep_align(objects, join='inner', copy=True, indexes=None, exclude=frozenset(), raise_on_invalid=True): """Align objects for merging, recursing into dictionary values. This function is not pub...
Extract indexers to align target with other. Not public API. Parameters ---------- target : Dataset or DataArray Object to be aligned. other : Dataset or DataArray Object to be aligned with. Returns ------- Dict[Any, pandas.Index] providing indexes for reindex keyword ...
Conform a dictionary of aligned variables onto a new set of variables, filling in missing values with NaN. Not public API. Parameters ---------- variables : dict-like Dictionary of xarray.Variable objects. sizes : dict-like Dictionary from dimension names to integer sizes. ...
Explicitly broadcast any number of DataArray or Dataset objects against one another. xarray objects automatically broadcast against each other in arithmetic operations, so this function should not be necessary for normal use. If no change is needed, the input data is returned to the output without ...
Maeke sure the dimension coordinate of obj is consistent with coords. obj: DataArray or Dataset coords: Dict-like of variables def assert_coordinate_consistent(obj, coords): """ Maeke sure the dimension coordinate of obj is consistent with coords. obj: DataArray or Dataset coords: Dict-li...
Remap **indexers from obj.coords. If indexer is an instance of DataArray and it has coordinate, then this coordinate will be attached to pos_indexers. Returns ------- pos_indexers: Same type of indexers. np.ndarray or Variable or DataArra new_indexes: mapping of new dimensional-coordina...
Convert all index coordinates into a :py:class:`pandas.Index`. Parameters ---------- ordered_dims : sequence, optional Possibly reordered version of this object's dimensions indicating the order in which dimensions should appear on the result. Returns --...
For use with binary arithmetic. def _merge_raw(self, other): """For use with binary arithmetic.""" if other is None: variables = OrderedDict(self.variables) else: # don't align because we already called xarray.align variables = expand_and_merge_variables( ...
For use with in-place binary arithmetic. def _merge_inplace(self, other): """For use with in-place binary arithmetic.""" if other is None: yield else: # don't include indexes in priority_vars, because we didn't align # first priority_vars = Ordere...
Merge two sets of coordinates to create a new Dataset The method implements the logic used for joining coordinates in the result of a binary operation performed on xarray objects: - If two index coordinates conflict (are not equal), an exception is raised. You must align your data be...
Provide method for the key-autocompletions in IPython. def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython. """ return [key for key in self._data._ipython_key_completions_() if key not in self._data.data_vars]
wrapper to apply bottleneck moving window funcs on dask arrays def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1): '''wrapper to apply bottleneck moving window funcs on dask arrays''' dtype, fill_value = dtypes.maybe_promote(a.dtype) a = a.astype(dtype) # inputs for overlap i...
Dask's equivalence to np.utils.rolling_window def rolling_window(a, axis, window, center, fill_value): """ Dask's equivalence to np.utils.rolling_window """ orig_shape = a.shape if axis < 0: axis = a.ndim + axis depth = {d: 0 for d in range(a.ndim)} depth[axis] = int(window / 2) # For e...
Given encoding chunks (possibly None) and variable chunks (possibly None) def _determine_zarr_chunks(enc_chunks, var_chunks, ndim): """ Given encoding chunks (possibly None) and variable chunks (possibly None) """ # zarr chunk spec: # chunks : int or tuple of ints, optional # Chunk shape. If...
Converts an Variable into an Variable which follows some of the CF conventions: - Nans are masked using _FillValue (or the deprecated missing_value) - Rescaling via: scale_factor and add_offset - datetimes are converted to the CF 'units since time' format - dtype encodings are enfor...
Load and decode a dataset from a Zarr store. .. note:: Experimental The Zarr backend is new and experimental. Please report any unexpected behavior via github issues. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata ...
Extract dimension sizes from a dictionary of variables. Raises ValueError if any dimensions have different sizes. def broadcast_dimension_size( variables: List[Variable], ) -> 'OrderedDict[Any, int]': """Extract dimension sizes from a dictionary of variables. Raises ValueError if any dimensions have ...
Return the unique variable from a list of variables or raise MergeError. Parameters ---------- name : hashable Name for this variable. variables : list of xarray.Variable List of Variable objects, all of which go by the same name in different inputs. compat : {'identical', '...
Merge dicts of variables, while resolving conflicts appropriately. Parameters ---------- lists_of_variables_dicts : list of mappings with Variable values List of mappings for which each value is a xarray.Variable object. priority_vars : mapping with Variable or None values, optional If ...
Given a list of dicts with xarray object values, expand the values. Parameters ---------- list_of_variable_dicts : list of dict or Dataset objects Each value for the mappings must be of the following types: - an xarray.Variable - a tuple `(dims, data[, attrs[, encoding]])` that can ...
Given a list of dicts with xarray object values, identify coordinates. Parameters ---------- list_of_variable_dicts : list of dict or Dataset objects Of the same form as the arguments to expand_variable_dicts. Returns ------- coord_names : set of variable names noncoord_names : set...
Convert pandas values found in a list of labeled objects. Parameters ---------- objects : list of Dataset or mappings The mappings may contain any sort of objects coercible to xarray.Variables as keys, including pandas objects. Returns ------- List of Dataset or OrderedDict obj...
Merge coordinate variables without worrying about alignment. This function is used for merging variables in coordinates.py. def merge_coords_for_inplace_math(objs, priority_vars=None): """Merge coordinate variables without worrying about alignment. This function is used for merging variables in coordinat...
Extract the priority variable from a list of mappings. We need this method because in some cases the priority argument itself might have conflicting values (e.g., if it is a dict with two DataArray values with conflicting coordinate values). Parameters ---------- objects : list of dictionaries...
Merge coordinate variables without worrying about alignment. This function is used for merging variables in computation.py. def expand_and_merge_variables(objs, priority_arg=None): """Merge coordinate variables without worrying about alignment. This function is used for merging variables in computation.p...
Merge coordinate variables. See merge_core below for argument descriptions. This works similarly to merge_core, except everything we don't worry about whether variables are coordinates or not. def merge_coords(objs, compat='minimal', join='outer', priority_arg=None, indexes=None): """...
Used in Dataset.__init__. def merge_data_and_coords(data, coords, compat='broadcast_equals', join='outer'): """Used in Dataset.__init__.""" objs = [data, coords] explicit_coords = coords.keys() indexes = dict(extract_indexes(coords)) return merge_core(objs, compat, join, e...
Yields the name & index of valid indexes from a mapping of coords def extract_indexes(coords): """Yields the name & index of valid indexes from a mapping of coords""" for name, variable in coords.items(): variable = as_variable(variable, name=name) if variable.dims == (name,): yield...
Validate explicit coordinate names/dims. Raise a MergeError if an explicit coord shares a name with a dimension but is comprised of arbitrary dimensions. def assert_valid_explicit_coords(variables, dims, explicit_coords): """Validate explicit coordinate names/dims. Raise a MergeError if an explicit c...
Core logic for merging labeled objects. This is not public API. Parameters ---------- objs : list of mappings All values must be convertable to labeled arrays. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional Compatibility checks to use when merging va...
Merge any number of xarray objects into a single Dataset as variables. Parameters ---------- objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]] Merge together all variables from these objects. If any of them are DataArray objects, they must have a name. compat : {'identic...
Guts of the Dataset.merge method. def dataset_merge_method(dataset, other, overwrite_vars, compat, join): """Guts of the Dataset.merge method.""" # we are locked into supporting overwrite_vars for the Dataset.merge # method due for backwards compatibility # TODO: consider deprecating it? if isins...
Guts of the Dataset.update method. This drops a duplicated coordinates from `other` if `other` is not an `xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068, GH2180). def dataset_update_method(dataset, other): """Guts of the Dataset.update method. This drops a duplicated coordina...
replace nan in a by val, and returns the replaced array and the nan position def _replace_nan(a, val): """ replace nan in a by val, and returns the replaced array and the nan position """ mask = isnull(a) return where_method(val, mask, a), mask
xarray version of pandas.core.nanops._maybe_null_out def _maybe_null_out(result, axis, mask, min_count=1): """ xarray version of pandas.core.nanops._maybe_null_out """ if hasattr(axis, '__len__'): # if tuple or list raise ValueError('min_count is not available for reduction ' ...
In house nanargmin, nanargmax for object arrays. Always return integer type def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs): """ In house nanargmin, nanargmax for object arrays. Always return integer type """ valid_count = count(value, axis=axis) value = fillna(value, fi...
In house nanmin and nanmax for object array def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs): """ In house nanmin and nanmax for object array """ valid_count = count(value, axis=axis) filled_value = fillna(value, fill_value) data = getattr(np, func)(filled_value, axis=axis, **kwargs...
In house nanmean. ddof argument will be used in _nanvar method def _nanmean_ddof_object(ddof, value, axis=None, **kwargs): """ In house nanmean. ddof argument will be used in _nanvar method """ from .duck_array_ops import (count, fillna, _dask_or_eager_func, where_method) ...
Disable automatic decoding on a netCDF4.Variable. We handle these types of decoding ourselves. def _disable_auto_decode_variable(var): """Disable automatic decoding on a netCDF4.Variable. We handle these types of decoding ourselves. """ var.set_auto_maskandscale(False) # only added in netCDF...
Simpler equivalent of pandas.core.common._maybe_promote Parameters ---------- dtype : np.dtype Returns ------- dtype : Promoted dtype that can hold missing values. fill_value : Valid missing value for the promoted dtype. def maybe_promote(dtype): """Simpler equivalent of pandas.core.c...
Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype Returns ------- fill_value : positive infinity value corresponding to this dtype. def get_pos_infinity(dtype): """Return an appropriate positive infinity for this dtype. Parameters ----...
Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype Returns ------- fill_value : positive infinity value corresponding to this dtype. def get_neg_infinity(dtype): """Return an appropriate positive infinity for this dtype. Parameters ----...
Check if a dtype is a subclass of the numpy datetime types def is_datetime_like(dtype): """Check if a dtype is a subclass of the numpy datetime types """ return (np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64))
Like np.result_type, but with type promotion rules matching pandas. Examples of changed behavior: number + string -> object (not string) bytes + unicode -> object (not unicode) Parameters ---------- *arrays_and_dtypes : list of arrays and dtypes The dtype is extracted from both numpy a...
Parse ENVI metadata into Python data structures. See the link for information on the ENVI header file format: http://www.harrisgeospatial.com/docs/enviheaderfiles.html Parameters ---------- meta : dict Dictionary of keys and str values to parse, as returned by the rasterio tags(ns=...
Open a file with rasterio (experimental). This should work with any file that rasterio can open (most often: geoTIFF). The x and y coordinates are generated automatically from the file's geoinformation, shifted to the center of each pixel (see `"PixelIsArea" Raster Space <http://web.archive.org/web...
Get indexer for rasterio array. Parameter --------- key: tuple of int Returns ------- band_key: an indexer for the 1st dimension window: two tuples. Each consists of (start, stop). squeeze_axis: axes to be squeezed np_ind: indexer for loaded nump...
Group an array by its unique values. Parameters ---------- ar : array-like Input array. This will be flattened if it is not already 1-D. sort : boolean, optional Whether or not to sort unique values. Returns ------- values : np.ndarray Sorted, unique values as retur...
Consolidate adjacent slices in a list of slices. def _consolidate_slices(slices): """Consolidate adjacent slices in a list of slices. """ result = [] last_slice = slice(None) for slice_ in slices: if not isinstance(slice_, slice): raise ValueError('list element is not a slice: %...
Like inverse_permutation, but also handles slices. Parameters ---------- positions : list of np.ndarray or slice objects. If slice objects, all are assumed to be slices. Returns ------- np.ndarray of indices or None, if no permutation is necessary. def _inverse_permutation_indices(pos...
(copied from pandas) if loffset is set, offset the result index This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample def _apply_loffset(grouper, result): """ (copied from pan...
Iterate over each element in this group def _iter_grouped(self): """Iterate over each element in this group""" for indices in self._group_indices: yield self._obj.isel(**{self._group_dim: indices})
Our index contained empty groups (e.g., from a resampling). If we reduced on that dimension, we want to restore the full index. def _maybe_restore_empty_groups(self, combined): """Our index contained empty groups (e.g., from a resampling). If we reduced on that dimension, we want to restore the...
This gets called if we are applying on an array with a multidimensional group. def _maybe_unstack(self, obj): """This gets called if we are applying on an array with a multidimensional group.""" if self._stacked_dim is not None and self._stacked_dim in obj.dims: obj = obj.un...
Return the first element of each group along the group dimension def first(self, skipna=None, keep_attrs=None): """Return the first element of each group along the group dimension """ return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)
Return the last element of each group along the group dimension def last(self, skipna=None, keep_attrs=None): """Return the last element of each group along the group dimension """ return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)
Fast version of `_iter_grouped` that yields Variables without metadata def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without metadata """ var = self._obj.variable for indices in self._group_indices: yield var[{self...
Apply a function over each array in the group and concatenate them together into a new array. `func` is called like `func(ar, *args, **kwargs)` for each array `ar` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the array. ...
Recombine the applied objects like the original. def _combine(self, applied, shortcut=False): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) coord, dim, positions = self._infer_concat_args(applied_example) if shortcut: comb...
Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : function Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer v...
Apply a function over each Dataset in the group and concatenate them together into a new Dataset. `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the d...
Recombine the applied objects like the original. def _combine(self, applied): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) coord, dim, positions = self._infer_concat_args(applied_example) combined = concat(applied, dim) combi...
Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : function Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer v...
Return attrs that are not in ignored_attrs def _filter_attrs(attrs, ignored_attrs): """ Return attrs that are not in ignored_attrs """ return dict((k, v) for k, v in attrs.items() if k not in ignored_attrs)
Convert a cdms2 variable into an DataArray def from_cdms2(variable): """Convert a cdms2 variable into an DataArray """ values = np.asarray(variable) name = variable.id dims = variable.getAxisIds() coords = {} for axis in variable.getAxisList(): coords[axis.id] = DataArray( ...
Convert a DataArray into a cdms2 variable def to_cdms2(dataarray, copy=True): """Convert a DataArray into a cdms2 variable """ # we don't want cdms2 to be a hard dependency import cdms2 def set_cdms2_attrs(var, attrs): for k, v in attrs.items(): setattr(var, k, v) # 1D axe...
Return attrs with keys in keys list def _pick_attrs(attrs, keys): """ Return attrs with keys in keys list """ return dict((k, v) for k, v in attrs.items() if k in keys)
Converts the xarray attrs into args that can be passed into Iris def _get_iris_args(attrs): """ Converts the xarray attrs into args that can be passed into Iris """ # iris.unit is deprecated in Iris v1.9 import cf_units args = {'attributes': _filter_attrs(attrs, iris_forbidden_keys)} args.updat...
Convert a DataArray into a Iris Cube def to_iris(dataarray): """ Convert a DataArray into a Iris Cube """ # Iris not a hard dependency import iris from iris.fileformats.netcdf import parse_cell_methods dim_coords = [] aux_coords = [] for coord_name in dataarray.coords: coord =...
Return a dictionary of attrs when given a Iris object def _iris_obj_to_attrs(obj): """ Return a dictionary of attrs when given a Iris object """ attrs = {'standard_name': obj.standard_name, 'long_name': obj.long_name} if obj.units.calendar: attrs['calendar'] = obj.units.calendar ...
Converts a Iris cell methods into a string def _iris_cell_methods_to_str(cell_methods_obj): """ Converts a Iris cell methods into a string """ cell_methods = [] for cell_method in cell_methods_obj: names = ''.join(['{}: '.format(n) for n in cell_method.coord_names]) intervals = ' '.join...
Mimicks `iris_obj.name()` but with different name resolution order. Similar to iris_obj.name() method, but using iris_obj.var_name first to enable roundtripping. def _name(iris_obj, default='unknown'): """ Mimicks `iris_obj.name()` but with different name resolution order. Similar to iris_obj.name() ...
Convert a Iris cube into an DataArray def from_iris(cube): """ Convert a Iris cube into an DataArray """ import iris.exceptions from xarray.core.pycompat import dask_array_type name = _name(cube) if name == 'unknown': name = None dims = [] for i in range(cube.ndim): try...