text stringlengths 81 112k |
|---|
r"""Standard 2-layer LSTM language model with tied embedding and output weights.
Both embedding and hidden dimensions are 200.
Parameters
----------
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specifi... |
r"""Big 1-layer LSTMP language model.
Both embedding and projection size are 512. Hidden size is 2048.
Parameters
----------
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
Options are 'gbw'. If specified, then the returned vocabular... |
Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers.
the ... |
Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers*2.
Fo... |
Get the object type of the cell by parsing the input
Parameters
----------
cell_type : str or type
Returns
-------
cell_constructor: type
The constructor of the RNNCell
def _get_cell_type(cell_type):
"""Get the object type of the cell by parsing the input
Parameters
-----... |
Compute the context with respect to a center word in a sentence.
Takes an numpy array of sentences boundaries.
def _get_context(center_idx, sentence_boundaries, window_size,
random_window_size, seed):
"""Compute the context with respect to a center word in a sentence.
Takes an numpy arra... |
Construct the model.
def model(dropout, vocab, model_mode, output_size):
"""Construct the model."""
textCNN = SentimentNet(dropout=dropout, vocab_size=len(vocab), model_mode=model_mode,\
output_size=output_size)
textCNN.hybridize()
return textCNN |
Initialize parameters.
def init(textCNN, vocab, model_mode, context, lr):
"""Initialize parameters."""
textCNN.initialize(mx.init.Xavier(), ctx=context, force_reinit=True)
if model_mode != 'rand':
textCNN.embedding.weight.set_data(vocab.embedding.idx_to_vec)
if model_mode == 'multichannel':
... |
Use multiprocessing to perform transform for dataset.
Parameters
----------
dataset: dataset-like object
Source dataset.
transform: callable
Transformer function.
num_workers: int, default 8
The number of multiprocessing workers to use for data preprocessing.
def preprocess... |
Returns a pre-defined model by name.
Parameters
----------
name : str
Name of the model.
dataset_name : str or None, default 'wikitext-2'.
The dataset name on which the pre-trained model is trained.
For language model, options are 'wikitext-2'.
For ELMo, Options are 'gbw... |
Ignore the masked elements when calculating the softmax
Parameters
----------
F : symbol or ndarray
att_score : Symborl or NDArray
Shape (batch_size, query_length, memory_length)
mask : Symbol or NDArray or None
Shape (batch_size, query_length, memory_length)
Returns
-------... |
Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For mult... |
Get the translation result given the input sentence.
Parameters
----------
src_seq : mx.nd.NDArray
Shape (batch_size, length)
src_valid_length : mx.nd.NDArray
Shape (batch_size,)
Returns
-------
samples : NDArray
Samples draw ... |
Evaluate parser on a data set
Parameters
----------
parser : BiaffineParser
biaffine parser
vocab : ParserVocabulary
vocabulary built from data set
num_buckets_test : int
size of buckets (cluster sentences into this number of clusters)
test_batch_size : int
batch... |
Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object
def paramete... |
Create parameter given name, shape and initiator
Parameters
----------
name : str
parameter name
shape : tuple
parameter shape
init : mxnet.initializer
an initializer
Returns
-------
mxnet.gluon.parameter
a... |
Run decoding
Parameters
----------
word_inputs : mxnet.ndarray.NDArray
word indices of seq_len x batch_size
tag_inputs : mxnet.ndarray.NDArray
tag indices of seq_len x batch_size
arc_targets : mxnet.ndarray.NDArray
gold arc indices of seq_len ... |
Save model
Parameters
----------
filename : str
path to model file
def save_parameters(self, filename):
"""Save model
Parameters
----------
filename : str
path to model file
"""
params = self._collect_params_with_prefix()... |
Function for processing data in worker process.
def _worker_fn(samples, batchify_fn, dataset=None):
"""Function for processing data in worker process."""
# pylint: disable=unused-argument
# it is required that each worker process has to fork a new MXIndexedRecordIO handle
# preserving dataset as global... |
Threadpool worker function for processing data.
def _thread_worker_fn(samples, batchify_fn, dataset):
"""Threadpool worker function for processing data."""
if isinstance(samples[0], (list, tuple)):
batch = [batchify_fn([dataset[i] for i in shard]) for shard in samples]
else:
batch = batchif... |
Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `source`, use :func:`gluonnlp.embedding.list_sources`.
Pa... |
Get valid token embedding names and their pre-trained file names.
To load token embedding vectors from an externally hosted pre-trained token embedding file,
such as those of GloVe and FastText, one should use
`gluonnlp.embedding.create(embedding_name, source)`. This method returns all the
valid names... |
Load embedding vectors from a pre-trained token embedding file.
Both text files and TokenEmbedding serialization files are supported.
elem_delim and encoding are ignored for non-text files.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-tr... |
Load embedding vectors from a pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise... |
Load embedding vectors from a pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise... |
Check that tokens and embedding are in the format for __setitem__.
def _check_vector_update(self, tokens, new_embedding):
"""Check that tokens and embedding are in the format for __setitem__."""
assert self._idx_to_vec is not None, '`idx_to_vec` has not been initialized.'
if not isinstance(tok... |
Checks if a pre-trained token embedding source name is valid.
Parameters
----------
source : str
The pre-trained token embedding source.
def _check_source(cls, source_file_hash, source):
"""Checks if a pre-trained token embedding source name is valid.
Parameters
... |
Creates a user-defined token embedding from a pre-trained embedding file.
This is to load embedding vectors from a user-defined pre-trained token embedding file.
For example, if `elem_delim` = ' ', the expected format of a custom pre-trained token
embedding file may look like:
'hello ... |
Serializes the TokenEmbedding to a file specified by file_path.
TokenEmbedding is serialized by converting the list of tokens, the
array of word embeddings and other metadata to numpy arrays, saving all
in a single (optionally compressed) Zipfile. See
https://docs.scipy.org/doc/numpy-1.... |
Create a new TokenEmbedding from a serialized one.
TokenEmbedding is serialized by converting the list of tokens, the
array of word embeddings and other metadata to numpy arrays, saving all
in a single (optionally compressed) Zipfile. See
https://docs.scipy.org/doc/numpy-1.14.2/neps/npy... |
Evaluate the model on a mini-batch.
def evaluate(data_source):
"""Evaluate the model on a mini-batch.
"""
log.info('Start predict')
tic = time.time()
for batch in data_source:
inputs, token_types, valid_length = batch
out = net(inputs.astype('float32').as_in_context(ctx),
... |
Registers a dataset with segment specific hyperparameters.
When passing keyword arguments to `register`, they are checked to be valid
keyword arguments for the registered Dataset class constructor and are
saved in the registry. Registered keyword arguments can be retrieved with
the `list_datasets` func... |
Creates an instance of a registered dataset.
Parameters
----------
name : str
The dataset name (case-insensitive).
Returns
-------
An instance of :class:`mxnet.gluon.data.Dataset` constructed with the
keyword arguments passed to the create function.
def create(name, **kwargs):
... |
Get valid datasets and registered parameters.
Parameters
----------
name : str or None, default None
Return names and registered parameters of registered datasets. If name
is specified, only registered parameters of the respective dataset are
returned.
Returns
-------
d... |
Parse command line arguments.
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Vocabulary extractor.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--max-size', type=int, default=None)
parser.add_argumen... |
Compute the vocabulary.
def get_vocab(args):
"""Compute the vocabulary."""
counter = nlp.data.Counter()
start = time.time()
for filename in args.files:
print('Starting processing of {} after {:.1f} seconds.'.format(
filename,
time.time() - start))
with open(filen... |
Generate the unnormalized score for the given the input sequences.
Parameters
----------
inputs : NDArray, shape (batch_size, seq_length)
Input words for the sequences.
token_types : NDArray, shape (batch_size, seq_length)
Token types for the sequences, used to i... |
Add evaluation specific parameters to parser.
def add_parameters(parser):
"""Add evaluation specific parameters to parser."""
group = parser.add_argument_group('Evaluation arguments')
group.add_argument('--eval-batch-size', type=int, default=1024)
# Datasets
group.add_argument(
'--similar... |
Validate provided arguments and act on --help.
def validate_args(args):
"""Validate provided arguments and act on --help."""
# Check correctness of similarity dataset names
for dataset_name in args.similarity_datasets:
if dataset_name.lower() not in map(
str.lower,
n... |
Generator over all similarity evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
created dataset.
def iterate_similarity_datasets(args):
"""Generator over all similarity evaluation datasets.
Iterates over dataset names, keyword arguments for their creation ... |
Generator over all analogy evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
created dataset.
def iterate_analogy_datasets(args):
"""Generator over all analogy evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
... |
Returns a set of all tokens occurring the evaluation datasets.
def get_similarity_task_tokens(args):
"""Returns a set of all tokens occurring the evaluation datasets."""
tokens = set()
for _, _, dataset in iterate_similarity_datasets(args):
tokens.update(
itertools.chain.from_iterable((... |
Returns a set of all tokens occuring the evaluation datasets.
def get_analogy_task_tokens(args):
"""Returns a set of all tokens occuring the evaluation datasets."""
tokens = set()
for _, _, dataset in iterate_analogy_datasets(args):
tokens.update(
itertools.chain.from_iterable(
... |
Evaluate on specified similarity datasets.
def evaluate_similarity(args, token_embedding, ctx, logfile=None,
global_step=0):
"""Evaluate on specified similarity datasets."""
results = []
for similarity_function in args.similarity_functions:
evaluator = nlp.embedding.evaluat... |
Evaluate on specified analogy datasets.
The analogy task is an open vocabulary task, make sure to pass a
token_embedding with a sufficiently large number of supported tokens.
def evaluate_analogy(args, token_embedding, ctx, logfile=None, global_step=0):
"""Evaluate on specified analogy datasets.
The ... |
Log a similarity evaluation result dictionary as TSV to logfile.
def log_similarity_result(logfile, result):
"""Log a similarity evaluation result dictionary as TSV to logfile."""
assert result['task'] == 'similarity'
if not logfile:
return
with open(logfile, 'a') as f:
f.write('\t'.j... |
Get model for pre-training.
def get_model_loss(ctx, model, pretrained, dataset_name, dtype, ckpt_dir=None, start_step=None):
"""Get model for pre-training."""
# model
model, vocabulary = nlp.model.get_model(model,
dataset_name=dataset_name,
... |
create dataset for pretraining.
def get_pretrain_dataset(data, batch_size, num_ctxes, shuffle, use_avg_len,
num_buckets, num_parts=1, part_idx=0, prefetch=True):
"""create dataset for pretraining."""
num_files = len(glob.glob(os.path.expanduser(data)))
logging.debug('%d files found... |
Return a dummy data loader which returns a fixed data batch of target shape
def get_dummy_dataloader(dataloader, target_shape):
"""Return a dummy data loader which returns a fixed data batch of target shape"""
data_iter = enumerate(dataloader)
_, data_batch = next(data_iter)
logging.debug('Searching ta... |
Save the model parameter, marked by step_num.
def save_params(step_num, model, trainer, ckpt_dir):
"""Save the model parameter, marked by step_num."""
param_path = os.path.join(ckpt_dir, '%07d.params'%step_num)
trainer_path = os.path.join(ckpt_dir, '%07d.states'%step_num)
logging.info('[step %d] Saving... |
Log training progress.
def log(begin_time, running_num_tks, running_mlm_loss, running_nsp_loss, step_num,
mlm_metric, nsp_metric, trainer, log_interval):
"""Log training progress."""
end_time = time.time()
duration = end_time - begin_time
throughput = running_num_tks / duration / 1000.0
run... |
split and load arrays to a list of contexts
def split_and_load(arrs, ctx):
"""split and load arrays to a list of contexts"""
assert isinstance(arrs, (list, tuple))
# split and load
loaded_arrs = [mx.gluon.utils.split_and_load(arr, ctx, even_split=False) for arr in arrs]
return zip(*loaded_arrs) |
forward computation for evaluation
def forward(data, model, mlm_loss, nsp_loss, vocab_size, dtype):
"""forward computation for evaluation"""
(input_id, masked_id, masked_position, masked_weight, \
next_sentence_label, segment_id, valid_length) = data
num_masks = masked_weight.sum() + 1e-8
valid_le... |
Evaluation function.
def evaluate(data_eval, model, nsp_loss, mlm_loss, vocab_size, ctx, log_interval, dtype):
"""Evaluation function."""
mlm_metric = MaskedAccuracy()
nsp_metric = MaskedAccuracy()
mlm_metric.reset()
nsp_metric.reset()
eval_begin_time = time.time()
begin_time = time.time()... |
Argument parser
def get_argparser():
"""Argument parser"""
parser = argparse.ArgumentParser(description='BERT pretraining example.')
parser.add_argument('--num_steps', type=int, default=20, help='Number of optimization steps')
parser.add_argument('--num_buckets', type=int, default=1,
... |
Cache the processed npy dataset the dataset into a npz
Parameters
----------
dataset : SimpleDataset
file_path : str
def _cache_dataset(dataset, prefix):
"""Cache the processed npy dataset the dataset into a npz
Parameters
----------
dataset : SimpleDataset
file_path : str
"""... |
Load translation dataset
Parameters
----------
dataset : str
args : argparse result
Returns
-------
def load_translation_data(dataset, bleu, args):
"""Load translation dataset
Parameters
----------
dataset : str
args : argparse result
Returns
-------
"""
... |
Create data loaders for training/validation/test.
def make_dataloader(data_train, data_val, data_test, args,
use_average_length=False, num_shards=0, num_workers=8):
"""Create data loaders for training/validation/test."""
data_train_lengths = get_data_lengths(data_train)
data_val_lengths... |
Method representing the process’s activity.
def run(self):
"""Method representing the process’s activity."""
random.seed(self.seed)
np.random.seed(self.np_seed)
if not isinstance(self, multiprocessing.Process):
# Calling mxnet methods in a subprocess will raise an exception ... |
Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
def forward(self, inputs, states=None): # pylint: disable=arguments-differ
"""Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`."""
batch_... |
Compute embedding of words in batch.
Parameters
----------
words : mx.nd.NDArray
Array of token indices.
def hybrid_forward(self, F, words, weight):
"""Compute embedding of words in batch.
Parameters
----------
words : mx.nd.NDArray
Arra... |
Create an instance of the class and load weights.
Load the weights from the fastText binary format created by
https://github.com/facebookresearch/fastText
Parameters
----------
path : str
Path to the .bin model file.
ctx : mx.Context, default mx.cpu()
... |
Config the logging.
def logging_config(logpath=None,
level=logging.DEBUG,
console_level=logging.INFO,
no_console=False):
"""
Config the logging.
"""
logger = logging.getLogger('nli')
# Remove all the current handlers
for handler in logger... |
Parse command line arguments.
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='GloVe with GluonNLP',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Data options
group = parser.add_argument_group('Data arguments')
group.... |
Helper function to get training data.
def get_train_data(args):
"""Helper function to get training data."""
counter = dict()
with io.open(args.vocab, 'r', encoding='utf-8') as f:
for line in f:
token, count = line.split('\t')
counter[token] = int(count)
vocab = nlp.Vocab... |
Training helper.
def train(args):
"""Training helper."""
vocab, row, col, counts = get_train_data(args)
model = GloVe(token_to_idx=vocab.token_to_idx, output_dim=args.emsize,
dropout=args.dropout, x_max=args.x_max, alpha=args.alpha,
weight_initializer=mx.init.Uniform(sca... |
Log to a file.
def log(args, kwargs):
"""Log to a file."""
logfile = os.path.join(args.logdir, 'log.tsv')
if 'log_created' not in globals():
if os.path.exists(logfile):
logging.error('Logfile %s already exists.', logfile)
sys.exit(1)
global log_created
log... |
Compute embedding of words in batch.
Parameters
----------
row : mxnet.nd.NDArray or mxnet.sym.Symbol
Array of token indices for source words. Shape (batch_size, ).
row : mxnet.nd.NDArray or mxnet.sym.Symbol
Array of token indices for context words. Shape (batch_... |
Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data with class indices as values, one per sample.
preds : list of `NDArray`
Prediction values for samples. Each prediction value can either be the class in... |
Predict the relation of two sentences.
Parameters
----------
sentence1 : NDArray
Shape (batch_size, length)
sentence2 : NDArray
Shape (batch_size, length)
Returns
-------
pred : NDArray
Shape (batch_size, num_classes). num_cla... |
Compute intra-sentence attention given embedded words.
Parameters
----------
feature_a : NDArray
Shape (batch_size, length, hidden_size)
Returns
-------
alpha : NDArray
Shape (batch_size, length, hidden_size)
def hybrid_forward(self, F, feature_... |
Forward of Decomposable Attention layer
def hybrid_forward(self, F, a, b):
"""
Forward of Decomposable Attention layer
"""
# a.shape = [B, L1, H]
# b.shape = [B, L2, H]
# extract features
tilde_a = self.f(a) # shape = [B, L1, H]
tilde_b = self.f(b) # sh... |
r"""Counts tokens in the specified string.
For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may
look like::
(td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)
Parameters
----------
tokens : list of str
A source list of tok... |
Slice a flat sequence of tokens into sequences tokens, with each
inner sequence's length equal to the specified `length`, taking into account the requested
sequence overlap.
Parameters
----------
sequence : list of object
A flat list of tokens.
length : int
The length of each of... |
Calculate the padding length needed for sliced samples in order not to discard data.
Parameters
----------
num_items : int
Number of items in dataset before collating.
length : int
The length of each of the samples.
overlap : int, default 0
The extra number of items in curre... |
Split the dataset into training and validation sets.
Parameters
----------
dataset : list
A list of training samples.
valid_ratio : float, default 0.05
Proportion of training samples to use for validation set
range: [0, 1]
Returns
-------
train : SimpleDataset
v... |
Load the accompanying vocabulary object for pre-trained model.
Parameters
----------
name : str
Name of the vocabulary, usually the name of the dataset.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
cls... |
Extract archive file
Parameters
----------
file : str
Absolute path of the archive file.
target_dir : str
Target directory of the archive to be uncompressed
def _extract_archive(file, target_dir):
"""Extract archive file
Parameters
----------
file : str
Absolut... |
Discards tokens with frequency below min_frequency and represents them
as `unknown_token`.
Parameters
----------
min_freq: int
Tokens whose frequency is under min_freq is counted as `unknown_token` in
the Counter returned.
unknown_token: str
T... |
Training function.
def train():
"""Training function."""
trainer = gluon.Trainer(model.collect_params(), args.optimizer,
{'learning_rate': args.lr, 'beta2': 0.98, 'epsilon': 1e-9})
train_data_loader, val_data_loader, test_data_loader \
= dataprocessor.make_dataloader(da... |
r"""
Forward computation for highway layer
Parameters
----------
inputs: NDArray
The input tensor is of shape `(..., input_size)`.
Returns
----------
outputs: NDArray
The output tensor is of the same shape with input tensor `(..., input_s... |
Generate the unnormalized score for the given the input sequences.
Parameters
----------
inputs : NDArray, shape (batch_size, seq_length)
Input words for the sequences.
token_types : NDArray, shape (batch_size, seq_length)
Token types for the sequences, used to i... |
Defines the forward computation for cache cell. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
----------
inputs : NDArray
The input data layout='TNC'.
states : Tuple[List[List[NDArray]]]
The states. including:
s... |
Read tokens from the provided parse tree in the SNLI dataset.
Illegal examples are removed.
def main(args):
"""
Read tokens from the provided parse tree in the SNLI dataset.
Illegal examples are removed.
"""
examples = []
with open(args.input, 'r') as fin:
reader = csv.DictReader(fi... |
one iteration of k-means
def _recenter(self):
"""
one iteration of k-means
"""
for split_idx in range(len(self._splits)):
split = self._splits[split_idx]
len_idx = self._split2len_idx[split]
if split == self._splits[-1]:
continue
... |
Index every sentence into a cluster
def _reindex(self):
"""
Index every sentence into a cluster
"""
self._len2split_idx = {}
last_split = -1
for split_idx, split in enumerate(self._splits):
self._len2split_idx.update(
dict(list(zip(list(range(... |
Build a pair of GNMT encoder/decoder
Parameters
----------
cell_type : str or type
attention_cell : str or AttentionCell
num_layers : int
num_bi_layers : int
hidden_size : int
dropout : float
use_residual : bool
i2h_weight_initializer : mx.init.Initializer or None
h2h_weight... |
Initialize the state from the encoder outputs.
Parameters
----------
encoder_outputs : list
encoder_valid_length : NDArray or None
Returns
-------
decoder_states : list
The decoder states, includes:
- rnn_states : NDArray
- a... |
Decode the decoder inputs. This function is only used for training.
Parameters
----------
inputs : NDArray, Shape (batch_size, length, C_in)
states : list of NDArrays or None
Initial states. The list of initial decoder states
valid_length : NDArray or None
... |
Transform instance to inputs for MLM and NSP.
def transform(instance, tokenizer, max_seq_length, max_predictions_per_seq, do_pad=True):
"""Transform instance to inputs for MLM and NSP."""
pad = tokenizer.convert_tokens_to_ids(['[PAD]'])[0]
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
in... |
Write to numpy files from `TrainingInstance`s.
def write_to_files_np(features, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
# pylint: disable=unused-argument
"""Write to numpy files from `TrainingInstance`s."""
next_sentence_labels = []
valid_lengths = []
... |
Create IndexedRecordIO files from `TrainingInstance`s.
def write_to_files_rec(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
"""Create IndexedRecordIO files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.appen... |
Create `TrainingInstance`s from raw text.
def create_training_instances(x):
"""Create `TrainingInstance`s from raw text."""
(input_files, out, tokenizer, max_seq_length, dupe_factor,
short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng) = x
time_start = time.time()
logging.info('Processing... |
Creates `TrainingInstance`s for a single document.
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[doc... |
Creates the predictions for the masked LM objective.
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
... |
Truncates a pair of sequences to a maximum sequence length.
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
b... |
Main function.
def main():
"""Main function."""
time_start = time.time()
logging.info('loading vocab file from dataset: %s', args.vocab)
vocab_obj = nlp.data.utils._load_pretrained_vocab(args.vocab)
tokenizer = BERTTokenizer(
vocab=vocab_obj, lower='uncased' in args.vocab)
input_files ... |
GluonNLP specific code to convert the original vocabulary to nlp.vocab.BERTVocab.
def convert_vocab(vocab_file):
"""GluonNLP specific code to convert the original vocabulary to nlp.vocab.BERTVocab."""
original_vocab = load_vocab(vocab_file)
token_to_idx = dict(original_vocab)
num_tokens = len(token_to_... |
read tensorflow checkpoint
def read_tf_checkpoint(path):
"""read tensorflow checkpoint"""
from tensorflow.python import pywrap_tensorflow
tensors = {}
reader = pywrap_tensorflow.NewCheckpointReader(path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in sorted(var_to_shape_map):
... |
profile the program between [start_step, end_step).
def profile(curr_step, start_step, end_step, profile_name='profile.json',
early_exit=True):
"""profile the program between [start_step, end_step)."""
if curr_step == start_step:
mx.nd.waitall()
mx.profiler.set_config(profile_memory... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.