text stringlengths 81 112k |
|---|
Save this instance to a json file.
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string()) |
Initialize the weights.
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/... |
Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
... |
Loads a data file into a list of `InputFeature`s.
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputFeature`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b... |
Read a list of `InputExample`s from an input file.
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line... |
Read a SQuAD json file into a list of SquadExample.
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(... |
Loads a data file into a list of `InputBatch`s.
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_in... |
Returns tokenized answer spans that better match the annotated answer.
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. W... |
Check if this is the 'max context' doc span for the token.
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents... |
Write final predictions to the json file and log-odds of null if needed.
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
... |
Project the tokenized prediction back to the original text.
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized)... |
Get the n-best logits from a list.
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
... |
Compute softmax probability over raw logits.
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
... |
Loads a data file into a list of `InputBatch`s.
def convert_examples_to_features(examples, tokenizer, max_seq_length,
is_training):
"""Loads a data file into a list of `InputBatch`s."""
# Swag is a multiple choice task. To perform this task using Bert,
# we will use the fo... |
Loads a data file into a list of `InputBatch`s.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = [... |
Reads a tab separated value file.
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
... |
See base class.
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") |
Creates examples for the training and dev sets.
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
... |
See base class.
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") |
See base class.
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched") |
Masks everything but the k top entries as -infinity (1e10).
Used to mask logits such that e^-infinity -> 0 won't contribute to the
sum of the denominator.
def top_k_logits(logits, k):
"""
Masks everything but the k top entries as -infinity (1e10).
Used to mask logits such that e^-infinity -> 0 won'... |
Load tf checkpoints in a pytorch model
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requ... |
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
... |
Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.... |
Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters.
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
con... |
Update input embeddings with new embedding matrice if needed
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = ... |
Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.... |
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A clo... |
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total step... |
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A clo... |
Runs basic whitespace cleaning and splitting on a piece of text.
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens |
Checks whether `chars` is a punctuation character.
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat... |
Converts a sequence of tokens into ids using the vocab.
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
... |
Converts a sequence of ids in wordpiece tokens using the vocab.
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens |
Save the tokenizer vocabulary to a directory or file.
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encod... |
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Down... |
Tokenizes a piece of text.
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the... |
Strips accents from a piece of text.
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
... |
Adds whitespace around any CJK character.
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(... |
Checks whether CP is the codepoint of a CJK character.
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)... |
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespa... |
Output a list of tuples(story, 1st continuation, 2nd continuation, label)
def load_rocstories_dataset(dataset_path):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
next(f) # ... |
Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:c... |
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for L... |
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokeniz... |
Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
from one doc. With 50% the second sentence will be a random one from another doc.
:param index: int, index of sample.
:return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
... |
Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.
:param item: int, index of sample.
:return: (str, str), two subsequent sentences from corpus
def get_corpus_line(self, item):
"""
Get one sample from corpus consisting of a pair of two subsequent ... |
Get random line from another document for nextSentence task.
:return: str, content of one line
def get_random_line(self):
"""
Get random line from another document for nextSentence task.
:return: str, content of one line
"""
# Similar to original tf repo: This outer loop... |
Gets next line of random_file and starts over when reaching end of file
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to... |
Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
with several refactors to clean it up and remove a lot of unnecessary variables.
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list):
"""Creates the predictions fo... |
This code is mostly a duplicate of the equivalent function from Google BERT's repo.
However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function.
Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence
... |
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding la... |
Params:
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
Return:
if target is None:
out :: [len*bsz] Negative log likelihood
else:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabula... |
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0... |
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_s... |
A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the origi... |
Load tf checkpoints in a pytorch model
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow... |
Initialize the weights.
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias... |
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:... |
Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that t... |
Run this to be sure output and input (adaptive) softmax weights are tied
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.we... |
Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model... |
Return DateOffset object from string or tuple representation
or datetime.timedelta object
Parameters
----------
freq : str, tuple, datetime.timedelta, DateOffset or None
Returns
-------
DateOffset
None if freq is None.
Raises
------
ValueError
If freq is an inv... |
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in libfreqs._dont_uppercase... |
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
---... |
Find the appropriate frequency string to describe the inferred
frequency of self.values
Returns
-------
str or None
def get_freq(self):
"""
Find the appropriate frequency string to describe the inferred
frequency of self.values
Returns
-------
... |
load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
def load(fh, encoding=None, is_ve... |
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__.
def _new_Index(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__.
"""
# required for backward compat, because PI can't be i... |
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure... |
Ensure that we have an index from some index-like object.
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
... |
Trims zeros and decimal points.
def _trim_front(strings):
"""
Trims zeros and decimal points.
"""
trimmed = strings
while len(strings) > 0 and all(x[0] == ' ' for x in trimmed):
trimmed = [x[1:] for x in trimmed]
return trimmed |
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
We require that we have a dtype compat for the values. If we... |
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional... |
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-... |
Internal method to handle NA filling of take.
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
"""
Internal method to handle NA filling of take.
"""
indices = ensure_platform_int(indices)
# only f... |
Return the formatted data as a unicode string.
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
... |
Render a string representation of the Index.
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('... |
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be form... |
Actually format specific types of the index.
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
"""
Actually format specific types of the index.
"""
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
... |
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
def _summary(self, name=None):
"""
Return a summarized repres... |
Return a summarized representation.
.. deprecated:: 0.23.0
def summary(self, name=None):
"""
Return a summarized representation.
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", Future... |
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
... |
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.24.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
name : object, default None
The passed name should substit... |
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' paramet... |
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
... |
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label
Name(s) to set.
level : int, label or list of int or label, optional
If the index is a MultiIndex, level(s) to set (None for ... |
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : label or list of labels
Name(s) to set.
inplace : boolean, default F... |
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verif... |
For internal compatibility with with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
---... |
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
I... |
Return if each value is NaN.
def _isnan(self):
"""
Return if each value is NaN.
"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
... |
Extract duplicated index elements.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns a sorted list of index elements which appear more than once in
the index.
Returns
-------
array-like
List of duplicated indexes.
... |
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
... |
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
... |
Form the union of two Index objects.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `... |
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do n... |
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort the resulting index. By default, th... |
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
result_name : str
sort : False or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any T... |
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
... |
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
va... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.