text stringlengths 81 112k |
|---|
Returns a non-terminal unit rule from 'g', or None if there is none.
def get_any_nt_unit_rule(g):
"""Returns a non-terminal unit rule from 'g', or None if there is none."""
for rule in g.rules:
if len(rule.rhs) == 1 and isinstance(rule.rhs[0], NT):
return rule
return None |
Removes 'rule' from 'g' without changing the langugage produced by 'g'.
def _remove_unit_rule(g, rule):
"""Removes 'rule' from 'g' without changing the langugage produced by 'g'."""
new_rules = [x for x in g.rules if x != rule]
refs = [x for x in g.rules if x.lhs == rule.rhs[0]]
new_rules += [build_uni... |
Splits a rule whose len(rhs) > 2 into shorter rules.
def _split(rule):
"""Splits a rule whose len(rhs) > 2 into shorter rules."""
rule_str = str(rule.lhs) + '__' + '_'.join(str(x) for x in rule.rhs)
rule_name = '__SP_%s' % (rule_str) + '_%d'
yield Rule(rule.lhs, [rule.rhs[0], NT(rule_name % 1)], weight... |
Applies the TERM rule on 'g' (see top comment).
def _term(g):
"""Applies the TERM rule on 'g' (see top comment)."""
all_t = {x for rule in g.rules for x in rule.rhs if isinstance(x, T)}
t_rules = {t: Rule(NT('__T_%s' % str(t)), [t], weight=0, alias='Term') for t in all_t}
new_rules = []
for rule in... |
Applies the BIN rule to 'g' (see top comment).
def _bin(g):
"""Applies the BIN rule to 'g' (see top comment)."""
new_rules = []
for rule in g.rules:
if len(rule.rhs) > 2:
new_rules += _split(rule)
else:
new_rules.append(rule)
return Grammar(new_rules) |
Applies the UNIT rule to 'g' (see top comment).
def _unit(g):
"""Applies the UNIT rule to 'g' (see top comment)."""
nt_unit_rule = get_any_nt_unit_rule(g)
while nt_unit_rule:
g = _remove_unit_rule(g, nt_unit_rule)
nt_unit_rule = get_any_nt_unit_rule(g)
return g |
Reverts a parse tree (RuleNode) to its original non-CNF form (Node).
def revert_cnf(node):
"""Reverts a parse tree (RuleNode) to its original non-CNF form (Node)."""
if isinstance(node, T):
return node
# Reverts TERM rule.
if node.rule.lhs.name.startswith('__T_'):
return node.children[0... |
Converts a lark rule, (lhs, rhs, callback, options), to a Rule.
def _to_rule(self, lark_rule):
"""Converts a lark rule, (lhs, rhs, callback, options), to a Rule."""
assert isinstance(lark_rule.origin, NT)
assert all(isinstance(x, Symbol) for x in lark_rule.expansion)
return Rule(
... |
Parses input, which is a list of tokens.
def parse(self, tokenized): # pylint: disable=invalid-name
"""Parses input, which is a list of tokens."""
table, trees = _parse(tokenized, self.grammar)
# Check if the parse succeeded.
if all(r.lhs != self.start for r in table[(0, len(tokenized)... |
Converts a RuleNode parse tree to a lark Tree.
def _to_tree(self, rule_node):
"""Converts a RuleNode parse tree to a lark Tree."""
orig_rule = self.orig_rules[rule_node.rule.alias]
children = []
for child in rule_node.children:
if isinstance(child, RuleNode):
... |
Creates a colorful image that represents the tree (data+children, without meta)
Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to
directed graphs drawn from top to bottom, from left to right, from bottom to
top, and from right to left, respectively. See:
https://www.graphviz.or... |
Expand (inline) children at the given indices
def expand_kids_by_index(self, *indices):
"Expand (inline) children at the given indices"
for i in sorted(indices, reverse=True): # reverse so that changing tail won't affect indices
kid = self.children[i]
self.children[i:i+1] = kid.... |
See Interpreter
def visit_children_decor(func):
"See Interpreter"
@wraps(func)
def inner(cls, tree):
values = cls.visit_children(tree)
return func(cls, values)
return inner |
A convenience decorator factory, for modifying the behavior of user-supplied visitor methods
def v_args(inline=False, meta=False, tree=False):
"A convenience decorator factory, for modifying the behavior of user-supplied visitor methods"
if [tree, meta, inline].count(True) > 1:
raise ValueError("Visito... |
Given a parser instance and a dictionary mapping some label with
some malformed syntax examples, it'll return the label for the
example that bests matches the current error.
def match_examples(self, parse_fn, examples):
""" Given a parser instance and a dictionary mapping some label wit... |
Consume a token and calculate the new line & column.
As an optional optimization, set test_newline=False is token doesn't contain a newline.
def feed(self, token, test_newline=True):
"""Consume a token and calculate the new line & column.
As an optional optimization, set test_newline=False is... |
Create an instance of Lark with the grammar given by its filename
If rel_to is provided, the function will find the grammar filename in relation to it.
Example:
>>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
Lark(...)
def open(cls, grammar_filename, rel_to... |
Only lex (and postlex) the text, without parsing it. Only relevant when lexer='standard
def lex(self, text):
"Only lex (and postlex) the text, without parsing it. Only relevant when lexer='standard'"
if not hasattr(self, 'lexer'):
self.lexer = self._build_lexer()
stream = self.lexer... |
Calculate FOLLOW sets.
Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets
def calculate_sets(rules):
"""Calculate FOLLOW sets.
Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets"""
symbols = {sym for rule in rules for sym in rule.expansion} | {rule.... |
Returns all init_ptrs accessible by rule (recursive)
def expand_rule(self, rule):
"Returns all init_ptrs accessible by rule (recursive)"
init_ptrs = set()
def _expand_rule(rule):
assert not rule.is_term, rule
for r in self.rules_by_origin[rule]:
init_ptr... |
Returns all rules and terminals of grammar, prepended
with a 'namespace' prefix, except for those which are aliased.
def import_from_grammar_into_namespace(grammar, namespace, aliases):
"""Returns all rules and terminals of grammar, prepended
with a 'namespace' prefix, except for those which are aliased.
... |
Parse grammar_text, verify, and create Grammar object. Display nice messages on error.
def load_grammar(self, grammar_text, grammar_name='<?>'):
"Parse grammar_text, verify, and create Grammar object. Display nice messages on error."
try:
tree = self.canonize_tree.transform( self.parser.pa... |
The core Earley Predictor and Completer.
At each stage of the input, we handling any completed items (things
that matched on the last cycle) and use those to predict what should
come next in the input stream. The completions and any predicted
non-terminals are recursively processed unti... |
Conditionally disabled/enables form fields based on the current
section in the radio group
def applyStyleRules(self):
"""
Conditionally disabled/enables form fields based on the current
section in the radio group
"""
for button, widget in zip(self.radioButtons, sel... |
Checkboxes are hidden when inside of a RadioGroup as a selection of
the Radio button is an implicit selection of the Checkbox. As such, we have
to manually "check" any checkbox as needed.
def handleImplicitCheck(self):
"""
Checkboxes are hidden when inside of a RadioGroup as a selec... |
Instantiate the Gooey Widgets that are used within the RadioGroup
def createWidgets(self):
"""
Instantiate the Gooey Widgets that are used within the RadioGroup
"""
from gooey.gui.components import widgets
return [getattr(widgets, item['type'])(self, item)
... |
Verify user input and kick off the client's program if valid
def onStart(self, *args, **kwarg):
"""
Verify user input and kick off the client's program if valid
"""
with transactUI(self):
config = self.navbar.getActiveConfig()
config.resetErrors()
... |
Return the user to the settings screen for further editing
def onEdit(self):
"""Return the user to the settings screen for further editing"""
with transactUI(self):
if self.buildSpec['poll_external_updates']:
self.fetchExternalUpdates()
self.showSettings() |
Collect all of the required information from the config screen and
build a CLI string which can be used to invoke the client program
def buildCliString(self):
"""
Collect all of the required information from the config screen and
build a CLI string which can be used to invoke the cl... |
Display the appropriate screen based on the success/fail of the
host program
def onComplete(self, *args, **kwargs):
"""
Display the appropriate screen based on the success/fail of the
host program
"""
with transactUI(self):
if self.clientRunner.was_suc... |
!Experimental!
Calls out to the client code requesting seed values to use in the UI
!Experimental!
def fetchExternalUpdates(self):
"""
!Experimental!
Calls out to the client code requesting seed values to use in the UI
!Experimental!
"""
seeds = s... |
Chooses the appropriate layout navigation component based on user prefs
def buildNavigation(self):
"""
Chooses the appropriate layout navigation component based on user prefs
"""
if self.buildSpec['navigation'] == constants.TABBED:
navigation = Tabbar(self, self.buildSp... |
Choose the best font face available given the user options
def getFontFace(self):
"""Choose the best font face available given the user options"""
userFace = self.buildSpec['terminal_font_family'] or self.defaultFont.GetFaceName()
return (''
if self.buildSpec['monospace_disp... |
Reads the stdout of `process` and forwards lines and progress
to any interested subscribers
def _forward_stdout(self, process):
'''
Reads the stdout of `process` and forwards lines and progress
to any interested subscribers
'''
while True:
line = proce... |
Finds progress information in the text using the
user-supplied regex and calculation instructions
def _extract_progress(self, text):
'''
Finds progress information in the text using the
user-supplied regex and calculation instructions
'''
# monad-ish dispatch to av... |
Calculates the final progress value found by the regex
def _calculate_progress(self, match):
'''
Calculates the final progress value found by the regex
'''
if not self.progress_expr:
return safe_float(match.group(1))
else:
return self._eval_progres... |
Runs the user-supplied progress calculation rule
def _eval_progress(self, match):
'''
Runs the user-supplied progress calculation rule
'''
_locals = {k: safe_float(v) for k, v in match.groupdict().items()}
if "x" not in _locals:
_locals["x"] = [safe_float(x) fo... |
Hide/show configuration panels based on the currently selected
option in the sidebar
def swapConfigPanels(self, event):
"""Hide/show configuration panels based on the currently selected
option in the sidebar """
for id, panel in enumerate(self.configPanels):
panel.Hid... |
value, disable_animation=False
:param args:
:param kwargs:
:return:
def updateProgressBar(self, *args, **kwargs):
'''
value, disable_animation=False
:param args:
:param kwargs:
:return:
'''
value = kwargs.get('progress')
... |
Decorator for client code's main function.
Serializes argparse data to JSON for use with the Gooey front end
def Gooey(f=None,
advanced=True,
language='english',
auto_start=False, # TODO: add this to the docs. Used to be `show_config=True`
target=None,
program... |
Sends a gooey-seed-ui request to the client program it retrieve
dynamically generated defaults with which to seed the UI
def fetchDynamicProperties(target, encoding):
"""
Sends a gooey-seed-ui request to the client program it retrieve
dynamically generated defaults with which to seed the UI
""... |
Messily builds the (potentially) nested and grouped layout
Note! Mutates `self.reifiedWidgets` in place with the widgets as they're
instantiated! I cannot figure out how to split out the creation of the
widgets from their styling without WxPython violently exploding
TODO: sort ou... |
chunk the widgets up into groups based on their sizing hints
def chunkWidgets(self, group):
''' chunk the widgets up into groups based on their sizing hints '''
ui_groups = []
subgroup = []
for index, item in enumerate(group['items']):
if getin(item, ['options', 'full_w... |
Convert a JSON description of a widget into a WxObject
def reifyWidget(self, parent, item):
''' Convert a JSON description of a widget into a WxObject '''
from gooey.gui.components import widgets
widgetClass = getattr(widgets, item['type'])
return widgetClass(parent, item) |
Recursively extract argument groups and associated actions
from ParserGroup objects
def extract_groups(action_group):
'''
Recursively extract argument groups and associated actions
from ParserGroup objects
'''
return {
'name': action_group.title,
'description': action_gr... |
_actions possessing the `required` flag and not implicitly optional
through `nargs` being '*' or '?'
def is_required(action):
'''
_actions possessing the `required` flag and not implicitly optional
through `nargs` being '*' or '?'
'''
return not isinstance(action, _SubParsersAction) and (... |
actions which are general "store" instructions.
e.g. anything which has an argument style like:
$ script.py -f myfilename.txt
def is_standard(action):
""" actions which are general "store" instructions.
e.g. anything which has an argument style like:
$ script.py -f myfilename.txt
""... |
_actions which are either storeconst, store_bool, etc..
def is_flag(action):
""" _actions which are either storeconst, store_bool, etc.. """
action_types = [_StoreTrueAction, _StoreFalseAction, _StoreConstAction]
return any(list(map(lambda Action: isinstance(action, Action), action_types))) |
Returns
str(option_string * DropDown Value)
e.g.
-vvvvv
def counter(metatdata, value):
'''
Returns
str(option_string * DropDown Value)
e.g.
-vvvvv
'''
if not str(value).isdigit():
return None
arg = str(metatdata['commands'][0]).replace('-', ''... |
Open and return the supplied json file
def load(language_dir, filename, encoding):
''' Open and return the supplied json file '''
global _DICTIONARY
try:
json_file = filename + '.json'
with io.open(os.path.join(language_dir, json_file), 'r', encoding=encoding) as f:
_DICTIONARY = json.load(f)... |
returns the value in a nested dict
def getin(m, path, default=None):
"""returns the value in a nested dict"""
keynotfound = ':com.gooey-project/not-found'
result = reduce(lambda acc, val: acc.get(val, {keynotfound: None}), path, m)
# falsey values like 0 would incorrectly trigger the default to be ... |
Copy-on-write associates a value in a dict
def assoc(m, key, val):
"""Copy-on-write associates a value in a dict"""
cpy = deepcopy(m)
cpy[key] = val
return cpy |
Copy-on-write associates a value in a nested dict
def associn(m, path, value):
""" Copy-on-write associates a value in a nested dict """
def assoc_recursively(m, path, value):
if not path:
return value
p = path[0]
return assoc(m, p, assoc_recursively(m.get(p,{}), path[... |
Merge all maps left to right
def merge(*maps):
"""Merge all maps left to right"""
copies = map(deepcopy, maps)
return reduce(lambda acc, val: acc.update(val) or acc, copies) |
Return first occurrence matching f, otherwise None
def findfirst(f, coll):
"""Return first occurrence matching f, otherwise None"""
result = list(dropwhile(f, coll))
return result[0] if result else None |
Generates the `input_tensor` that minimizes the weighted `losses`. This function is intended for advanced
use cases where a custom loss is desired.
Args:
input_tensor: An input tensor of shape: `(samples, channels, image_dims...)` if `image_data_format=
channels_first` or `(samples, image_d... |
Generates the model input that maximizes the output of all `filter_indices` in the given `layer_idx`.
Args:
model: The `keras.models.Model` instance. The model input shape must be: `(samples, channels, image_dims...)`
if `image_data_format=channels_first` or `(samples, image_dims..., channels)`... |
Uses RMSProp to compute step from gradients.
Args:
grads: numpy array of gradients.
cache: numpy array of same shape as `grads` as RMSProp cache
decay_rate: How fast to decay cache
Returns:
A tuple of
step: numpy array of the same shape a... |
Creates a random `seed_input` if None. Otherwise:
- Ensures batch_size dim on provided `seed_input`.
- Shuffle axis according to expected `image_data_format`.
def _get_seed_input(self, seed_input):
"""Creates a random `seed_input` if None. Otherwise:
- Ensures batch_size dim... |
Performs gradient descent on the input image with respect to defined losses.
Args:
seed_input: An N-dim numpy array of shape: `(samples, channels, image_dims...)` if `image_data_format=
channels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`.
... |
Orders the set of `objs` by `line_nos`
def order_by_line_nos(objs, line_nos):
"""Orders the set of `objs` by `line_nos`
"""
ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__)
return [objs[i] for i in ordering] |
Import a module path and create an api doc from it
Args:
string (str): string with line breaks to write to file.
filename (str): filename without the .md
out_path (str): The output directory
def to_md_file(string, filename, out_path="."):
"""Import a module path and create an api doc f... |
Creates a src path string with line info for use as markdown link.
def get_src_path(self, obj, append_base=True):
"""Creates a src path string with line info for use as markdown link.
"""
path = getsourcefile(obj)
if self.src_root not in path:
# this can happen with e.g.
... |
Parse docstring (parsed with getdoc) according to Google-style
formatting and convert to markdown. We support the following
Google style syntax:
Args, Kwargs:
argname (type): text
freeform text
Returns, Yields:
retname (type): text
freefor... |
Takes a function (or method) and documents it.
Args:
clsname (str, optional): class name to prepend to funcname.
depth (int, optional): number of ### to append to function name
def func2md(self, func, clsname=None, names=None, depth=3):
"""Takes a function (or method) and docum... |
Takes a class and creates markdown text to document its methods and variables.
def class2md(self, cls, depth=2):
"""Takes a class and creates markdown text to document its methods and variables.
"""
section = "#" * depth
subsection = "#" * (depth + 2)
clsname = cls.__name__
... |
Takes an imported module object and create a Markdown string containing functions and classes.
def module2md(self, module):
"""Takes an imported module object and create a Markdown string containing functions and classes.
"""
modname = module.__name__
path = self.get_src_path(module, ap... |
Searches for the nearest penultimate `Conv` or `Pooling` layer.
Args:
model: The `keras.models.Model` instance.
layer_idx: The layer index within `model.layers`.
penultimate_layer_idx: The pre-layer to `layer_idx`. If set to None, the nearest penultimate
`Conv` or `Pooling` laye... |
Generates an attention heatmap over the `seed_input` by using positive gradients of `input_tensor`
with respect to weighted `losses`.
This function is intended for advanced use cases where a custom loss is desired. For common use cases,
refer to `visualize_class_saliency` or `visualize_regression_saliency`... |
Generates an attention heatmap over the `seed_input` for maximizing `filter_indices`
output in the given `layer_idx`.
Args:
model: The `keras.models.Model` instance. The model input shape must be: `(samples, channels, image_dims...)`
if `image_data_format=channels_first` or `(samples, image... |
Generates a gradient based class activation map (CAM) by using positive gradients of `input_tensor`
with respect to weighted `losses`.
For details on grad-CAM, see the paper:
[Grad-CAM: Why did you say that? Visual Explanations from Deep Networks via Gradient-based Localization]
(https://arxiv.org/pdf/... |
Generates a gradient based class activation map (grad-CAM) that maximizes the outputs of
`filter_indices` in `layer_idx`.
Args:
model: The `keras.models.Model` instance. The model input shape must be: `(samples, channels, image_dims...)`
if `image_data_format=channels_first` or `(samples, i... |
Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior.
Args:
model: The `keras.models.Model` instance.
backprop_modifier: One of `{'guided', 'rectified'}`
Returns:
A copy of model with modified activations for backwards pass.
def modi... |
Normalizes the `output_tensor` with respect to `input_tensor` dimensions.
This makes regularizer weight factor more or less uniform across various input image dimensions.
Args:
input_tensor: An tensor of shape: `(samples, channels, image_dims...)` if `image_data_format=
channels_first` ... |
r"""Implements the N-dim version of function
$$TV^{\beta}(x) = \sum_{whc} \left ( \left ( x(h, w+1, c) - x(h, w, c) \right )^{2} +
\left ( x(h+1, w, c) - x(h, w, c) \right )^{2} \right )^{\frac{\beta}{2}}$$
to return total variation for all images in the batch.
def build_loss(self):
r""... |
Utility to find font file.
def _find_font_file(query):
"""Utility to find font file.
"""
return list(filter(lambda path: query.lower() in os.path.basename(path).lower(), fontman.findSystemFonts())) |
Updates `kwargs` with dict of `defaults`
Args:
defaults: A dictionary of keys and values
**kwargs: The kwargs to update.
Returns:
The updated kwargs.
def add_defaults_to_kwargs(defaults, **kwargs):
"""Updates `kwargs` with dict of `defaults`
Args:
defaults: A dictiona... |
Helper utility to retrieve the callable function associated with a string identifier.
Args:
identifier: The identifier. Could be a string or function.
module_globals: The global objects of the module.
module_name: The module name
Returns:
The callable associated with the identi... |
Applies modifications to the model layers to create a new Graph. For example, simply changing
`model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated
with modified inbound and outbound tensors because of change in layer building function.
Args:
... |
Creates a uniformly distributed random array with the given `mean` and `std`.
Args:
shape: The desired shape
mean: The desired mean (Default value = 128)
std: The desired std (Default value = 20)
Returns: Random numpy array of given `shape` uniformly distributed with desired `mean` and... |
Looks up the layer index corresponding to `layer_name` from `model`.
Args:
model: The `keras.models.Model` instance.
layer_name: The name of the layer to lookup.
Returns:
The layer index if found. Raises an exception otherwise.
def find_layer_idx(model, layer_name):
"""Looks up th... |
Utility function to scale the `input_array` to `input_range` throwing away high frequency artifacts.
Args:
input_array: An N-dim numpy array.
input_range: Specifies the input range as a `(min, max)` tuple to rescale the `input_array`.
Returns:
The rescaled `input_array`.
def deprocess... |
Utility function to stitch images together with a `margin`.
Args:
images: The array of 2D images to stitch.
margin: The black border margin size between images (Default value = 5)
cols: Max number of image cols. New row is created when number of images exceed the column size.
(D... |
Returns image shape in a backend agnostic manner.
Args:
img: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or
`(image_dims..., channels)` if data_format='channels_last'.
Returns:
Tuple containing image shape information in `(samples, channels... |
Utility function to load an image from disk.
Args:
path: The image file path.
grayscale: True to convert to grayscale image (Default value = False)
target_size: (w, h) to resize. (Default value = None)
Returns:
The loaded numpy image.
def load_img(path, grayscale=False, target_size=... |
Utility function to return the image net label for the final `dense` layer output index.
Args:
indices: Could be a single value or an array of indices whose labels should be looked up.
Returns:
Image net label corresponding to the image category.
def lookup_imagenet_labels(indices):
"""Ut... |
Draws text over the image. Requires PIL.
Args:
img: The image to use.
text: The text string to overlay.
position: The text (x, y) position. (Default value = (10, 10))
font: The ttf or open type font to use. (Default value = 'FreeSans.ttf')
font_size: The text font size. (Def... |
Normalizes the numpy array to (min_value, max_value)
Args:
array: The numpy array
min_value: The min value in normalized array (Default value = 0)
max_value: The max value in normalized array (Default value = 1)
Returns:
The array normalized to range between (min_value, max_val... |
Determines the number of filters within the given `layer`.
Args:
layer: The keras layer to use.
Returns:
Total number of filters within `layer`.
For `keras.layers.Dense` layer, this is the total number of outputs.
def get_num_filters(layer):
"""Determines the number of filters wit... |
Overlays `array1` onto `array2` with `alpha` blending.
Args:
array1: The first numpy array.
array2: The second numpy array.
alpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1],
with 0 being `array2` only to 1 being `array1` only (... |
All the logic for creating a new DataArray
def _infer_coords_and_dims(shape, coords, dims):
"""All the logic for creating a new DataArray"""
if (coords is not None and not utils.is_dict_like(coords) and
len(coords) != len(shape)):
raise ValueError('coords is not dict-like, but it has %s it... |
Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : st... |
Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
def _level_coords(self):
"""Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
"""
level_coords = OrderedDict()
for cname, var in self._coords.items():
... |
Mapping of pandas.Index objects used for label based indexing
def indexes(self):
"""Mapping of pandas.Index objects used for label based indexing
"""
if self._indexes is None:
self._indexes = default_indexes(self._coords, self.dims)
return Indexes(self._indexes) |
Given names of coordinates, reset them to become variables.
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional... |
Manually trigger loading of this array's data from disk or a
remote source into memory and return this array.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this... |
Trigger computation in constituent dask arrays
This keeps them as dask arrays but encourages them to keep data in
memory. This is particularly useful when on a distributed machine.
When on a single machine consider using ``.compute()`` instead.
Parameters
----------
**... |
Returns a copy of this array.
If `deep=True`, a deep copy is made of the data array.
Otherwise, a shallow copy is made, so each variable in the new
array's dataset is also a variable in this array's dataset.
Use `data` to create a new object with the same structure as
original ... |
Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
... |
Return a new DataArray whose dataset is given by integer indexing
along the specified dimension(s).
See Also
--------
Dataset.isel
DataArray.sel
def isel(self, indexers=None, drop=False, **indexers_kwargs):
"""Return a new DataArray whose dataset is given by integer ind... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.