text stringlengths 81 112k |
|---|
client wrapper for kernels_pull
def kernels_pull_cli(self,
kernel,
kernel_opt=None,
path=None,
metadata=False):
""" client wrapper for kernels_pull
"""
kernel = kernel or kernel_opt
effec... |
retrieve output for a specified kernel
Parameters
==========
kernel: the kernel to output
path: the path to pull files to on the filesystem
force: if output already exists, force overwrite (default False)
quiet: suppress verbosity (default is True... |
client wrapper for kernels_output, with same arguments. Extra
arguments are described below, and see kernels_output for others.
Parameters
==========
kernel_opt: option from client instead of kernel, if not defined
def kernels_output_cli(self,
... |
call to the api to get the status of a kernel.
Parameters
==========
kernel: the kernel to get the status for
def kernels_status(self, kernel):
""" call to the api to get the status of a kernel.
Parameters
==========
kernel: the kernel t... |
client wrapper for kernel_status
Parameters
==========
kernel_opt: additional option from the client, if kernel not defined
def kernels_status_cli(self, kernel, kernel_opt=None):
""" client wrapper for kernel_status
Parameters
==========
... |
determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose outpu... |
print a table of items, for a set of fields defined
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items
def print_table(self, items, fields):
""" print a table of items, for a set of fields defined
Par... |
print a set of fields in a set of items using a csv.writer
Parameters
==========
items: a list of items to print
fields: a list of fields to select from items
def print_csv(self, items, fields):
""" print a set of fields in a set of items using a csv.writer
... |
process a response from the API. We check the API version against
the client's to see if it's old, and give them a warning (once)
Parameters
==========
result: the result from the API
def process_response(self, result):
""" process a response from the API. We ch... |
determine if a client (on the local user's machine) is up to date
with the version provided on the server. Return a boolean with True
or False
Parameters
==========
server_version: the server version string to compare to the host
def is_up_to_date(self, serv... |
upload files in a folder
Parameters
==========
request: the prepared request
resources: the files to upload
folder: the folder to upload from
quiet: suppress verbose output (default is False)
def upload_files(self,
request,
... |
Helper function to upload a single file
Parameters
==========
file_name: name of the file to upload
full_path: path to the file to upload
request: the prepared request
resources: optional file metadata
quiet: suppress verbose output
... |
process a column, check for the type, and return the processed
column
Parameters
==========
column: a list of values in a column to be processed
def process_column(self, column):
""" process a column, check for the type, and return the processed
colu... |
function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
def upload_complete(self, path, url, quiet):
... |
determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate
def validate_dataset_string(self, dataset):
""" determine if a dataset string is valid, meaning it is in... |
determine if a kernel string is valid, meaning it is in the format
of {username}/{kernel-slug}.
Parameters
==========
kernel: the kernel name to validate
def validate_kernel_string(self, kernel):
""" determine if a kernel string is valid, meaning it is in the fo... |
validate resources is a wrapper to validate the existence of files
and that there are no duplicates for a folder and set of resources.
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder
def val... |
ensure that one or more resource files exist in a folder
Parameters
==========
folder: the folder to validate
resources: one or more resources to validate within the folder
def validate_files_exist(self, folder, resources):
""" ensure that one or more resource f... |
ensure that the user has not provided duplicate paths in
a list of resources.
Parameters
==========
resources: one or more resources to validate not duplicated
def validate_no_duplicate_paths(self, resources):
""" ensure that the user has not provided duplicate ... |
convert a set of file_data to a metadata file at path
Parameters
==========
file_data: a dictionary of file data to write to file
path: the path to write the metadata to
def convert_to_dataset_file_metadata(self, file_data, path):
""" convert a set of file_data ... |
read the buffer, passing named and non named arguments to the
io.BufferedReader function.
def read(self, *args, **kwargs):
""" read the buffer, passing named and non named arguments to the
io.BufferedReader function.
"""
buf = io.BufferedReader.read(self, *args, **kwargs... |
Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
def parameters_to_tuples(self, params, collection_formats)... |
Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
... |
Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
def __deserialize_file(self, response):
"""Deserializes body to file
... |
Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
... |
The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
def logger_file(self, value):
"""The logger file.
If the logger_file is ... |
Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str
def license_name(self, license_name):
"""Sets the license_name of this Datase... |
Train textCNN model for sentiment analysis.
def train(net, train_data, test_data):
"""Train textCNN model for sentiment analysis."""
start_pipeline_time = time.time()
net, trainer = text_cnn.init(net, vocab, args.model_mode, context, args.lr)
random.shuffle(train_data)
sp = int(len(train_data)*0.9)... |
Get tokens, tokens embedding
Parameters
----------
sentences : List[str]
sentences for encoding.
oov_way : str, default avg.
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
... |
Load, tokenize and prepare the input sentences.
def data_loader(self, sentences, shuffle=False):
"""Load, tokenize and prepare the input sentences."""
dataset = BertEmbeddingDataset(sentences, self.transform)
return DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=shuffle) |
How to handle oov. Also filter out [CLS], [SEP] tokens.
Parameters
----------
batches : List[(tokens_id,
sequence_outputs,
pooled_output].
batch token_ids (max_seq_length, ),
sequence_outputs (max_seq_length, dim,... |
Any BERT pretrained model.
Parameters
----------
model_name : str or None, default None
Options include 'bert_24_1024_16' and 'bert_12_768_12'.
dataset_name : str or None, default None
Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased'
for both bert_24_10... |
forward computation.
def hybrid_forward(self, F, data, gamma, beta):
"""forward computation."""
# TODO(haibin): LayerNorm does not support fp16 safe reduction. Issue is tracked at:
# https://github.com/apache/incubator-mxnet/issues/14073
if self._dtype:
data = data.astype('f... |
Construct a decoder for the next sentence prediction task
def _get_classifier(self, prefix):
""" Construct a decoder for the next sentence prediction task """
with self.name_scope():
classifier = nn.Dense(2, prefix=prefix)
return classifier |
Construct a decoder for the masked language model task
def _get_decoder(self, units, vocab_size, embed, prefix):
""" Construct a decoder for the masked language model task """
with self.name_scope():
decoder = nn.HybridSequential(prefix=prefix)
decoder.add(nn.Dense(units, flatte... |
Construct an embedding block.
def _get_embed(self, embed, vocab_size, embed_size, initializer, dropout, prefix):
""" Construct an embedding block. """
if embed is None:
assert embed_size is not None, '"embed_size" cannot be None if "word_embed" or ' \
... |
Construct pooler.
The pooler slices and projects the hidden output of first token
in the sequence for segment level classification.
def _get_pooler(self, units, prefix):
""" Construct pooler.
The pooler slices and projects the hidden output of first token
in the sequence for s... |
Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT model.
def _encode_sequence(self, inputs, token_types, valid_length=None):
"""Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT m... |
Generate unnormalized prediction for the masked language model task.
This is only used for pre-training the BERT model.
Inputs:
- **sequence**: input tensor of sequence encodings.
Shape (batch_size, seq_length, units).
- **masked_positions**: input tensor of posit... |
Extracts n-grams from an input segment.
Parameters
----------
segment: list
Text segment from which n-grams will be extracted.
n: int
Order of n-gram.
Returns
-------
ngram_counts: Counter
Contain all the nth n-grams in segment with a count of how many times each n-... |
Convert a sequence of bpe words into sentence.
def _bpe_to_words(sentence, delimiter='@@'):
"""Convert a sequence of bpe words into sentence."""
words = []
word = ''
delimiter_len = len(delimiter)
for subwords in sentence:
if len(subwords) >= delimiter_len and subwords[-delimiter_len:] == d... |
r"""
Tokenizes a string following the tokenizer in mteval-v13a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L917-L942
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The tokenized string
de... |
r"""Tokenize a string following following the international tokenizer in mteval-v14a.pl.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L954-L983
Parameters
----------
segment: str
A string to be tokenized
Returns
-------
The t... |
r"""Compute bleu score of translation against references.
Parameters
----------
reference_corpus_list: list of list(list(str)) or list of list(str)
list of list(list(str)): tokenized references
list of list(str): plain text
List of references for each translation.
translation_co... |
Compute ngram precision.
Parameters
----------
references: list(list(str))
A list of references.
translation: list(str)
A translation.
n: int
Order of n-gram.
Returns
-------
matches: int
Number of matched nth order n-grams
candidates
Number ... |
Calculate brevity penalty.
Parameters
----------
ref_length: int
Sum of all closest references'lengths for every translations in a corpus
trans_length: int
Sum of all translations's lengths in a corpus.
Returns
-------
bleu's brevity penalty: float
def _brevity_penalty(ref... |
Find the reference that has the closest length to the translation.
Parameters
----------
references: list(list(str))
A list of references.
trans_length: int
Length of the translation.
Returns
-------
closest_ref_len: int
Length of the reference that is closest to th... |
Compute the smoothed precision for all the orders.
Parameters
----------
precision_fractions: list(tuple)
Contain a list of (precision_numerator, precision_denominator) pairs
c: int, default 1
Smoothing constant to use
Returns
-------
ratios: list of floats
Contain ... |
Draw samples from log uniform distribution and returns sampled candidates,
expected count for true classes and sampled classes.
Parameters
----------
true_classes: NDArray
The true classes.
Returns
-------
samples: NDArray
The sampled can... |
Dataset preprocessing helper.
Parameters
----------
data : mx.data.Dataset
Input Dataset. For example gluonnlp.data.Text8 or gluonnlp.data.Fil9
min_freq : int, default 5
Minimum token frequency for a token to be included in the vocabulary
and returned DataStream.
max_vocab_s... |
Wikipedia dump helper.
Parameters
----------
wiki_root : str
Parameter for WikiDumpStream
wiki_date : str
Parameter for WikiDumpStream
wiki_language : str
Parameter for WikiDumpStream
max_vocab_size : int, optional
Specifies a maximum size for the vocabulary.
... |
Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose... |
Transform a DataStream of coded DataSets to a DataStream of batches.
Parameters
----------
data : gluonnlp.data.DataStream
DataStream where each sample is a valid input to
gluonnlp.data.EmbeddingCenterContextBatchify.
vocab : gluonnlp.Vocab
Vocabulary containing all tokens whose... |
Create a batch for CBOW training objective with subwords.
def cbow_fasttext_batch(centers, contexts, num_tokens, subword_lookup, dtype,
index_dtype):
"""Create a batch for CBOW training objective with subwords."""
_, contexts_row, contexts_col = contexts
data, row, col = subword_loo... |
Create a batch for SG training objective with subwords.
def skipgram_fasttext_batch(centers, contexts, num_tokens, subword_lookup,
dtype, index_dtype):
"""Create a batch for SG training objective with subwords."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
data, row,... |
Create a batch for CBOW training objective.
def cbow_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for CBOW training objective."""
contexts_data, contexts_row, contexts_col = contexts
centers = mx.nd.array(centers, dtype=index_dtype)
contexts = mx.nd.sparse.csr_matrix(
... |
Create a batch for SG training objective.
def skipgram_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for SG training objective."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
indptr = mx.nd.arange(len(centers) + 1)
centers = mx.nd.array(centers, dtype=index_dtyp... |
Get a sparse COO array of words and subwords for SkipGram.
Parameters
----------
indices : numpy.ndarray
Array containing numbers in [0, vocabulary_size). The element at
position idx is taken to be the word that occurs at row idx in the
SkipGram batch.
offset : int
Offse... |
Get a sparse COO array of words and subwords for CBOW.
Parameters
----------
context_row : numpy.ndarray of dtype int64
Array of same length as context_col containing numbers in [0,
batch_size). For each idx, context_row[idx] specifies the row that
context_col[idx] occurs in a spars... |
Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary.
def src_vocab(self):
"""Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary.
"""
if self._src_vocab ... |
Target Vocabulary of the Dataset.
Returns
-------
tgt_vocab : Vocab
Target vocabulary.
def tgt_vocab(self):
"""Target Vocabulary of the Dataset.
Returns
-------
tgt_vocab : Vocab
Target vocabulary.
"""
if self._tgt_vocab ... |
Evaluate given the data loader
Parameters
----------
data_loader : DataLoader
Returns
-------
avg_loss : float
Average loss
real_translation_out : list of list of str
The translation output
def evaluate(data_loader):
"""Evaluate given the data loader
Parameters
... |
Training function.
def train():
"""Training function."""
trainer = gluon.Trainer(model.collect_params(), args.optimizer, {'learning_rate': args.lr})
train_data_loader, val_data_loader, test_data_loader \
= dataprocessor.make_dataloader(data_train, data_val, data_test, args)
best_valid_bleu = ... |
r"""Returns a cache model using a pre-trained language model.
We implement the neural cache language model proposed in the following work::
@article{grave2016improving,
title={Improving neural language models with a continuous cache},
author={Grave, Edouard and Joulin, Armand and Usunier, ... |
Training helper.
def train(args):
"""Training helper."""
if not args.model.lower() in ['cbow', 'skipgram']:
logging.error('Unsupported model %s.', args.model)
sys.exit(1)
if args.data.lower() == 'toy':
data = mx.gluon.data.SimpleDataset(nlp.data.Text8(segment='train')[:2])
... |
Evaluation helper
def evaluate(args, embedding, vocab, global_step, eval_analogy=False):
"""Evaluation helper"""
if 'eval_tokens' not in globals():
global eval_tokens
eval_tokens_set = evaluation.get_tokens_in_evaluation_datasets(args)
if not args.no_eval_analogy:
eval_toke... |
Return the dataset corresponds to the provided key.
Example::
a = np.ones((2,2))
b = np.zeros((2,2))
np.savez('data.npz', a=a, b=b)
dataset = NumpyDataset('data.npz')
data_a = dataset.get_field('a')
data_b = dataset.get_field('b')
... |
Project the tokenized prediction back to the original text.
def get_final_text(pred_text, orig_text, tokenizer):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece t... |
Get prediction results
Parameters
----------
dev_dataset: dataset
Examples of transform.
all_results: dict
A dictionary containing model prediction results.
tokenizer: callable
Tokenizer function.
max_answer_length: int, default 64
Maximum length of the answer to... |
Calculate the F1 and EM scores of the predicted results.
Use only with the SQuAD1.1 dataset.
Parameters
----------
dataset_file: string
Path to the data file.
predict_data: dict
All final predictions.
Returns
-------
scores: dict
F1 and EM scores.
def get_F1_EM... |
Data preparation function.
def preprocess_data(tokenizer, task, batch_size, dev_batch_size, max_len, pad=False):
"""Data preparation function."""
# transformation
trans = BERTDatasetTransform(
tokenizer,
max_len,
labels=task.get_labels(),
pad=pad,
pair=task.is_pair,
... |
Evaluate the model on validation dataset.
def evaluate(dataloader_eval, metric):
"""Evaluate the model on validation dataset.
"""
metric.reset()
for _, seqs in enumerate(dataloader_eval):
input_ids, valid_len, type_ids, label = seqs
out = model(
input_ids.as_in_context(ctx),... |
Generate and print out the log message for training.
def log_train(batch_id, batch_num, metric, step_loss, log_interval, epoch_id, learning_rate):
"""Generate and print out the log message for training.
"""
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [me... |
Generate and print out the log message for inference.
def log_inference(batch_id, batch_num, metric, step_loss, log_interval):
"""Generate and print out the log message for inference.
"""
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm = [metric_nm]
me... |
Training function.
def train(metric):
"""Training function."""
logging.info('Now we are doing BERT classification training on %s!', ctx)
optimizer_params = {'learning_rate': lr, 'epsilon': epsilon, 'wd': 0.01}
try:
trainer = gluon.Trainer(
model.collect_params(),
args.o... |
Inference function.
def inference(metric):
"""Inference function."""
logging.info('Now we are doing BERT classification inference on %s!', ctx)
model = BERTClassifier(bert, dropout=0.1, num_classes=len(task.get_labels()))
model.hybridize(static_alloc=True)
model.load_parameters(model_parameters, c... |
Process SQuAD dataset by creating NDArray version of data
:param Dataset dataset: SQuAD dataset
:param int question_max_length: Maximum length of question (padded or trimmed to that size)
:param int context_max_length: Maximum length of context (padded or trimmed to that size)
Returns
-------
... |
Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_e... |
Provides word level vocabulary
Returns
-------
Vocab
Word level vocabulary
def get_word_level_vocab(self):
"""Provides word level vocabulary
Returns
-------
Vocab
Word level vocabulary
"""
def simple_tokenize(source_str,... |
Parameters
----------
states : list
the stack outputs from RNN, which consists of output from each time step (TNC).
Returns
--------
loss : NDArray
loss tensor with shape (batch_size,). Dimensions other than batch_axis are averaged out.
def hybrid_forwar... |
Tokenizes a piece of text.
def _tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since t... |
Performs invalid character removal and whitespace cleanup on text.
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp in (0, 0xfffd) or self._is_control(char):
... |
Checks whether `chars` is a control character.
def _is_control(self, char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char in ['\t', '\n', '\r']:
return False
cat =... |
Splits punctuation on a piece of text.
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if self._is_punctuation(char):... |
Checks whether `chars` is a punctuation character.
def _is_punctuation(self, char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punc... |
Checks whether `chars` is a whitespace character.
def _is_whitespace(self, char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char in [' ', '... |
Runs basic whitespace cleaning and splitting on a piece of text.
def _whitespace_tokenize(self, text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
tokens = text.split()
return tokens |
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespa... |
Truncates a sequence pair in place to the maximum length.
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes m... |
Construct the argument parser.
def get_args():
"""Construct the argument parser."""
parser = argparse.ArgumentParser(
description='Word embedding evaluation with Gluon.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Embeddings arguments
group = parser.add_argument_group('E... |
Validate provided arguments and act on --help.
def validate_args(args):
"""Validate provided arguments and act on --help."""
if args.list_embedding_sources:
print('Listing all sources for {} embeddings.'.format(
args.embedding_name))
print('Specify --embedding-name if you wish to '
... |
Load a TokenEmbedding.
def load_embedding_from_path(args):
"""Load a TokenEmbedding."""
if args.embedding_path.endswith('.bin'):
with utils.print_time('load fastText model.'):
model = \
nlp.model.train.FasttextEmbeddingModel.load_fasttext_format(
args.emb... |
Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before c... |
backward propagation with loss
def backward(self, loss):
"""backward propagation with loss"""
with mx.autograd.record():
if isinstance(loss, (tuple, list)):
ls = [l * self._scaler.loss_scale for l in loss]
else:
ls = loss * self._scaler.loss_scale... |
Makes one step of parameter update. Should be called after
`fp16_optimizer.backward()`, and outside of `record()` scope.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you norma... |
detect inf and nan
def has_overflow(self, params):
""" detect inf and nan """
is_not_finite = 0
for param in params:
if param.grad_req != 'null':
grad = param.list_grad()[0]
is_not_finite += mx.nd.contrib.isnan(grad).sum()
is_not_finit... |
dynamically update loss scale
def update_scale(self, overflow):
"""dynamically update loss scale"""
iter_since_rescale = self._num_steps - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._num_steps
self._overflows_since_rescale += 1
perce... |
Return a string representing the statistics of the bucketing sampler.
Returns
-------
ret : str
String representing the statistics of the buckets.
def stats(self):
"""Return a string representing the statistics of the bucketing sampler.
Returns
-------
... |
Training loop for language model.
def train():
"""Training loop for language model.
"""
print(model)
from_epoch = 0
model.initialize(mx.init.Xavier(factor_type='out'), ctx=context)
trainer_params = {'learning_rate': args.lr, 'wd': 0, 'eps': args.eps}
trainer = gluon.Trainer(model.collect_pa... |
Evaluate loop for the trained model
def evaluate():
""" Evaluate loop for the trained model """
print(eval_model)
eval_model.initialize(mx.init.Xavier(), ctx=context[0])
eval_model.hybridize(static_alloc=True, static_shape=True)
epoch = args.from_epoch if args.from_epoch else 0
while epoch < ar... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.