text stringlengths 81 112k |
|---|
Load sentiment dataset.
def load_dataset(data_name):
"""Load sentiment dataset."""
if data_name == 'MR' or data_name == 'Subj':
train_dataset, output_size = _load_file(data_name)
vocab, max_len = _build_vocab(data_name, train_dataset, [])
train_dataset, train_data_lengths = _preprocess_... |
Get home directory for storing datasets/models/pre-trained word embeddings
def get_home_dir():
"""Get home directory for storing datasets/models/pre-trained word embeddings"""
_home_dir = os.environ.get('MXNET_HOME', os.path.join('~', '.mxnet'))
# expand ~ to actual path
_home_dir = os.path.expanduser(... |
Read dataset from tokenized files.
def read_dataset(args, dataset):
"""
Read dataset from tokenized files.
"""
path = os.path.join(vars(args)[dataset])
logger.info('reading data from {}'.format(path))
examples = [line.strip().split('\t') for line in open(path)]
if args.max_num_examples > 0:... |
Build vocab given a dataset.
def build_vocab(dataset):
"""
Build vocab given a dataset.
"""
counter = nlp.data.count_tokens([w for e in dataset for s in e[:2] for w in s],
to_lower=True)
vocab = nlp.Vocab(counter)
return vocab |
Read data and build data loader.
def prepare_data_loader(args, dataset, vocab, test=False):
"""
Read data and build data loader.
"""
# Preprocess
dataset = dataset.transform(lambda s1, s2, label: (vocab(s1), vocab(s2), label),
lazy=False)
# Batching
batchify... |
If gpu available return gpu, else cpu
Returns
-------
context : Context
The preferable GPU context.
def mxnet_prefer_gpu():
"""If gpu available return gpu, else cpu
Returns
-------
context : Context
The preferable GPU context.
"""
gpu = int(os.environ.get('MXNET_GP... |
Initialize a logger
Parameters
----------
root_dir : str
directory for saving log
name : str
name of logger
Returns
-------
logger : logging.Logger
a logger
def init_logger(root_dir, name="train.log"):
"""Initialize a logger
Parameters
----------
r... |
Build a standard LSTM cell, with variational dropout,
with weights initialized to be orthonormal (https://arxiv.org/abs/1312.6120)
Parameters
----------
lstm_layers : int
Currently only support one layer
input_dims : int
word vector dimensions
lstm_hiddens : int
hidden s... |
Feature extraction through BiLSTM
Parameters
----------
f_lstm : VariationalDropoutCell
Forward cell
b_lstm : VariationalDropoutCell
Backward cell
inputs : NDArray
seq_len x batch_size
dropout_x : float
Variational dropout on inputs
dropout_h :
Not us... |
Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
... |
adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py
Parameters
----------
output_size : int
input_size : int
debug : bool
Whether to skip this initializer
Returns
-------
Q : np.ndarray
The orthonormal weight matrix of input_size x outpu... |
MST
Adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/models/nn.py
Parameters
----------
parse_probs : NDArray
seq_len x seq_len, the probability of arcs
length : NDArray
real sentence length
tokens_to_keep : NDArray
mask matrix
ensure_tree ... |
Fix the relation prediction by heuristic rules
Parameters
----------
rel_probs : NDArray
seq_len x rel_size
length :
real sentence length
ensure_tree :
whether to apply rules
Returns
-------
rel_preds : np.ndarray
prediction of relations of size (seq_len,... |
The missing Fortran reshape for mx.NDArray
Parameters
----------
tensor : NDArray
source tensor
shape : NDArray
desired shape
Returns
-------
output : NDArray
reordered result
def reshape_fortran(tensor, shape):
"""The missing Fortran reshape for mx.NDArray
... |
Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress b... |
Get mini-batches of the dataset.
Parameters
----------
data_source : NDArray
The dataset is evaluated on.
i : int
The index of the batch, starting from 0.
seq_len : int
The length of each sample in the batch.
Returns
-------
data: NDArray
The context
... |
Evaluate the model on the dataset.
Parameters
----------
data_source : NDArray
The dataset is evaluated on.
batch_size : int
The size of the mini-batch.
params_file_name : str
The parameter file to use to evaluate,
e.g., val.params or args.save
ctx : mx.cpu() or ... |
Training loop for awd language model.
def train():
"""Training loop for awd language model.
"""
ntasgd = False
best_val = float('Inf')
start_train_time = time.time()
parameters = model.collect_params()
param_dict_avg = None
t = 0
avg_trigger = 0
n = 5
valid_losses = []
... |
Registers a new word embedding evaluation function.
Once registered, we can create an instance with
:func:`~gluonnlp.embedding.evaluation.create`.
Examples
--------
>>> @gluonnlp.embedding.evaluation.register
... class MySimilarityFunction(gluonnlp.embedding.evaluation.WordEmbeddingSimilarityF... |
Creates an instance of a registered word embedding evaluation function.
Parameters
----------
kind : ['similarity', 'analogy']
Return only valid names for similarity, analogy or both kinds of
functions.
name : str
The evaluation function name (case-insensitive).
Returns
... |
Get valid word embedding functions names.
Parameters
----------
kind : ['similarity', 'analogy', None]
Return only valid names for similarity, analogy or both kinds of functions.
Returns
-------
dict or list:
A list of all the valid evaluation function names for the specified
... |
Predict the similarity of words1 and words2.
Parameters
----------
words1 : Symbol or NDArray
The indices of the words the we wish to compare to the words in words2.
words2 : Symbol or NDArray
The indices of the words the we wish to compare to the words in words1... |
Compute analogies for given question words.
Parameters
----------
words1 : Symbol or NDArray
Word indices of first question words. Shape (batch_size, ).
words2 : Symbol or NDArray
Word indices of second question words. Shape (batch_size, ).
words3 : Symbo... |
Evaluate the model on the dataset with cache model.
Parameters
----------
data_source : NDArray
The dataset is evaluated on.
batch_size : int
The size of the mini-batch.
ctx : mx.cpu() or mx.gpu()
The context of the computation.
Returns
-------
loss: float
... |
Returns a pre-defined model by name.
Parameters
----------
name : str
Name of the model.
dataset_name : str or None, default 'wikitext-2'.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
vocab : gluonnlp.Vocab or None, default... |
Static BERT BASE model.
The number of layers (L) is 12, number of units (H) is 768, and the
number of self-attention heads (A) is 12.
Parameters
----------
dataset_name : str or None, default None
Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased',
'wiki_cn_... |
Generate the representation given the inputs.
This is used in training or fine-tuning a static (hybridized) BERT model.
def hybrid_forward(self, F, inputs, token_types, valid_length=None, masked_positions=None):
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
"""Ge... |
Load parameters from file.
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) initialize loaded parameters on.
def load_parameters(self, filename, ctx=mx.cpu()): # pylint: disable=arguments-differ
"""Load parameters from fi... |
Defines the forward computation for cache cell. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
----------
inputs: NDArray
The input data
target: NDArray
The label
next_word_history: NDArray
The next word ... |
Assign input `x` to an available worker and invoke
`parallizable.forward_backward` with x.
def put(self, x):
"""Assign input `x` to an available worker and invoke
`parallizable.forward_backward` with x. """
if self._num_serial > 0 or len(self._threads) == 0:
self._num_serial... |
Deserialize BERTVocab object from json string.
Parameters
----------
json_str : str
Serialized json string of a BERTVocab object.
Returns
-------
BERTVocab
def from_json(cls, json_str):
"""Deserialize BERTVocab object from json string.
Para... |
Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
init... |
Defines the forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers*2.
... |
SkipGram forward pass.
Parameters
----------
center : mxnet.nd.NDArray or mxnet.sym.Symbol
Sparse CSR array of word / subword indices of shape (batch_size,
len(token_to_idx) + num_subwords). Embedding for center words are
computed via F.sparse.dot between the... |
Evaluate network on the specified dataset
def evaluate(dataloader):
"""Evaluate network on the specified dataset"""
total_L = 0.0
total_sample_num = 0
total_correct_num = 0
start_log_interval_time = time.time()
print('Begin Testing...')
for i, ((data, valid_length), label) in enumerate(data... |
Training process
def train():
"""Training process"""
start_pipeline_time = time.time()
# Training/Testing
best_valid_acc = 0
stop_early = 0
for epoch in range(args.epochs):
# Epoch training stats
start_epoch_time = time.time()
epoch_L = 0.0
epoch_sent_num = 0
... |
Forward logic
def hybrid_forward(self, F, data, valid_length): # pylint: disable=arguments-differ
"""Forward logic"""
# Data will have shape (T, N, C)
if self._use_mean_pool:
masked_encoded = F.SequenceMask(data,
sequence_length=valid_leng... |
r"""Hybrid forward computation for Long-Short Term Memory Projected network cell
with cell clip and projection clip.
Parameters
----------
inputs : input tensor with shape `(batch_size, input_size)`.
states : a list of two initial recurrent state tensors, with shape
... |
Rescales gradients of parameters so that the sum of their 2-norm is smaller than `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
T... |
Training function.
def train(data_train, model, nsp_loss, mlm_loss, vocab_size, ctx, store):
"""Training function."""
mlm_metric = nlp.metric.MaskedAccuracy()
nsp_metric = nlp.metric.MaskedAccuracy()
mlm_metric.reset()
nsp_metric.reset()
lr = args.lr
optim_params = {'learning_rate': lr, 'e... |
forward backward implementation
def forward_backward(self, x):
"""forward backward implementation"""
with mx.autograd.record():
(ls, next_sentence_label, classified, masked_id, decoded, \
masked_weight, ls1, ls2, valid_length) = forward(x, self._model, self._mlm_loss,
... |
Print statistical information via the provided logger
Parameters
----------
logger : logging.Logger
logger created using logging.getLogger()
def log_info(self, logger):
"""Print statistical information via the provided logger
Parameters
----------
l... |
Read pre-trained embedding file for extending vocabulary
Parameters
----------
pret_embeddings : tuple
(embedding_name, source), used for gluonnlp.embedding.create(embedding_name, source)
def _add_pret_words(self, pret_embeddings):
"""Read pre-trained embedding file for ext... |
Read pre-trained embedding file
Parameters
----------
word_dims : int or None
vector size. Use `None` for auto-infer
Returns
-------
numpy.ndarray
T x C numpy NDArray
def get_pret_embs(self, word_dims=None):
"""Read pre-trained embedding ... |
Get randomly initialized embeddings when pre-trained embeddings are used, otherwise zero vectors
Parameters
----------
word_dims : int
word vector size
Returns
-------
numpy.ndarray
T x C numpy NDArray
def get_word_embs(self, word_dims):
... |
Randomly initialize embeddings for tag
Parameters
----------
tag_dims : int
tag vector size
Returns
-------
numpy.ndarray
random embeddings
def get_tag_embs(self, tag_dims):
"""Randomly initialize embeddings for tag
Parameters
... |
Map word(s) to its id(s)
Parameters
----------
xs : str or list
word or a list of words
Returns
-------
int or list
id or a list of ids
def word2id(self, xs):
"""Map word(s) to its id(s)
Parameters
----------
xs ... |
Map id(s) to word(s)
Parameters
----------
xs : int
id or a list of ids
Returns
-------
str or list
word or a list of words
def id2word(self, xs):
"""Map id(s) to word(s)
Parameters
----------
xs : int
... |
Map relation(s) to id(s)
Parameters
----------
xs : str or list
relation
Returns
-------
int or list
id(s) of relation
def rel2id(self, xs):
"""Map relation(s) to id(s)
Parameters
----------
xs : str or list
... |
Map id(s) to relation(s)
Parameters
----------
xs : int
id or a list of ids
Returns
-------
str or list
relation or a list of relations
def id2rel(self, xs):
"""Map id(s) to relation(s)
Parameters
----------
xs :... |
Map tag(s) to id(s)
Parameters
----------
xs : str or list
tag or tags
Returns
-------
int or list
id(s) of tag(s)
def tag2id(self, xs):
"""Map tag(s) to id(s)
Parameters
----------
xs : str or list
t... |
Indices of sentences when enumerating data set from batches.
Useful when retrieving the correct order of sentences
Returns
-------
list
List of ids ranging from 0 to #sent -1
def idx_sequence(self):
"""Indices of sentences when enumerating data set from batches.
... |
Get batch iterator
Parameters
----------
batch_size : int
size of one batch
shuffle : bool
whether to shuffle batches. Don't set to True when evaluating on dev or test set.
Returns
-------
tuple
word_inputs, tag_inputs, arc_tar... |
Extract a set of n-grams from a list of integers.
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
{(4, 9), (4, 1), (1, 4), (9, 4)}
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
[(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
def create_ngram_set(input_list, ngram_value=2):
"""
Ex... |
Augment the input list of list (sequences) by appending n-grams values.
Example: adding bi-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
>>> add_ngram(sequences, token_indice, ngram_range=2)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2,... |
This function is used for evaluating accuracy of
a given data iterator. (Either Train/Test data)
It takes in the loss function used too!
def evaluate_accuracy(data_iterator, net, ctx, loss_fun, num_classes):
"""
This function is used for evaluating accuracy of
a given data iterator. (Either Train/T... |
Helper function to get training data
def read_input_data(filename):
"""Helper function to get training data"""
logging.info('Opening file %s for reading input', filename)
input_file = open(filename, 'r')
data = []
labels = []
for line in input_file:
tokens = line.split(',', 1)
l... |
Parse command line arguments.
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Text Classification with FastText',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Computation options
group = parser.add_argument_group('Comput... |
Create the mapping from label to numeric label
def get_label_mapping(train_labels):
"""
Create the mapping from label to numeric label
"""
sorted_labels = np.sort(np.unique(train_labels))
label_mapping = {}
for i, label in enumerate(sorted_labels):
label_mapping[label] = i
logging.i... |
This function takes a dataset and converts
it into sequences via multiprocessing
def convert_to_sequences(dataset, vocab):
"""This function takes a dataset and converts
it into sequences via multiprocessing
"""
start = time.time()
dataset_vocab = map(lambda x: (x, vocab), dataset)
with mp.P... |
Preprocess and prepare a dataset
def preprocess_dataset(dataset, labels):
""" Preprocess and prepare a dataset"""
start = time.time()
with mp.Pool() as pool:
# Each sample is processed in an asynchronous manner.
dataset = gluon.data.SimpleDataset(list(zip(dataset, labels)))
lengths ... |
Construct the DataLoader. Pad data, stack label and lengths
def get_dataloader(train_dataset, train_data_lengths,
test_dataset, batch_size):
""" Construct the DataLoader. Pad data, stack label and lengths"""
bucket_num, bucket_ratio = 20, 0.2
batchify_fn = gluonnlp.data.batchify.Tuple(
... |
Training function that orchestrates the Classification!
def train(args):
"""Training function that orchestrates the Classification! """
train_file = args.input
test_file = args.validation
ngram_range = args.ngrams
logging.info('Ngrams range for the training run : %s', ngram_range)
logging.info(... |
Generate the unnormalized score for the given the input sequences.
Parameters
----------
inputs : NDArray, shape (batch_size, seq_length)
Input words for the sequences.
token_types : NDArray, shape (batch_size, seq_length)
Token types for the sequences, used to i... |
Parameters
----------
pred : NDArray, shape (batch_size, seq_length, 2)
BERTSquad forward output.
label : list, length is 2, each shape is (batch_size,1)
label[0] is the starting position of the answer,
label[1] is the ending position of the answer.
R... |
Encode the input sequence.
Parameters
----------
inputs : NDArray
states : list of NDArrays or None, default None
valid_length : NDArray or None, default None
Returns
-------
outputs : list
Outputs of the encoder.
def encode(self, inputs, st... |
Decode given the input sequence.
Parameters
----------
inputs : NDArray
states : list of NDArrays
valid_length : NDArray or None, default None
Returns
-------
output : NDArray
The output of the decoder. Shape is (batch_size, length, tgt_word_... |
One step decoding of the translation model.
Parameters
----------
step_input : NDArray
Shape (batch_size,)
states : list of NDArrays
Returns
-------
step_output : NDArray
Shape (batch_size, C_out)
states : list
step_additi... |
Generate the prediction given the src_seq and tgt_seq.
This is used in training an NMT model.
Parameters
----------
src_seq : NDArray
tgt_seq : NDArray
src_valid_length : NDArray or None
tgt_valid_length : NDArray or None
Returns
-------
... |
Creates an instance of a subword function.
def create_subword_function(subword_function_name, **kwargs):
"""Creates an instance of a subword function."""
create_ = registry.get_create_func(SubwordFunction, 'token embedding')
return create_(subword_function_name, **kwargs) |
Indexes unknown and reserved tokens.
def _index_special_tokens(self, unknown_token, special_tokens):
"""Indexes unknown and reserved tokens."""
self._idx_to_token = [unknown_token] if unknown_token else []
if not special_tokens:
self._reserved_tokens = None
else:
... |
Indexes keys of `counter`.
Indexes keys of `counter` according to frequency thresholds such as `max_size` and
`min_freq`.
def _index_counter_keys(self, counter, unknown_token, special_tokens, max_size,
min_freq):
"""Indexes keys of `counter`.
Indexes keys... |
Attaches one or more embeddings to the indexed text tokens.
Parameters
----------
embeddings : None or tuple of :class:`gluonnlp.embedding.TokenEmbedding` instances
The embedding to be attached to the indexed tokens. If a tuple of multiple embeddings
are provided, their... |
Converts token indices to tokens according to the vocabulary.
Parameters
----------
indices : int or list of ints
A source token index or token indices to be converted.
Returns
-------
str or list of strs
A token or a list of tokens according t... |
Serialize Vocab object to json string.
This method does not serialize the underlying embedding.
def to_json(self):
"""Serialize Vocab object to json string.
This method does not serialize the underlying embedding.
"""
if self._embedding:
warnings.warn('Serializatio... |
Deserialize Vocab object from json string.
Parameters
----------
json_str : str
Serialized json string of a Vocab object.
Returns
-------
Vocab
def from_json(cls, json_str):
"""Deserialize Vocab object from json string.
Parameters
... |
Training function.
def train(data_train, model, nsp_loss, mlm_loss, vocab_size, ctx):
"""Training function."""
hvd.broadcast_parameters(model.collect_params(), root_rank=0)
mlm_metric = nlp.metric.MaskedAccuracy()
nsp_metric = nlp.metric.MaskedAccuracy()
mlm_metric.reset()
nsp_metric.reset()
... |
Training function.
def train():
"""Training function."""
log.info('Loader Train data...')
if version_2:
train_data = SQuAD('train', version='2.0')
else:
train_data = SQuAD('train', version='1.1')
log.info('Number of records in Train data:{}'.format(len(train_data)))
train_data_... |
Evaluate the model on validation dataset.
def evaluate():
"""Evaluate the model on validation dataset.
"""
log.info('Loader dev data...')
if version_2:
dev_data = SQuAD('dev', version='2.0')
else:
dev_data = SQuAD('dev', version='1.1')
log.info('Number of records in Train data:{... |
Inner Implementation of the Pad batchify
Parameters
----------
arrs : list
pad_axis : int
pad_val : number
use_shared_mem : bool, default False
Returns
-------
ret : NDArray
original_length : NDArray
def _pad_arrs_to_max_length(arrs, pad_axis, pad_val, use_shared_mem, dtype):
... |
Train a deep biaffine dependency parser
Parameters
----------
train_file : str
path to training set
dev_file : str
path to dev set
test_file : str
path to test set
save_dir : str
a directory for saving model and related met... |
Load from disk
Parameters
----------
path : str
path to the directory which typically contains a config.pkl file and a model.bin file
Returns
-------
DepParser
parser itself
def load(self, path):
"""Load from disk
Parameters
... |
Run evaluation on test set
Parameters
----------
test_file : str
path to test set
save_dir : str
where to store intermediate results and log
logger : logging.logger
logger for printing results
num_buckets_test : int
number ... |
Parse raw sentence into ConllSentence
Parameters
----------
sentence : list
a list of (word, tag) tuples
Returns
-------
ConllSentence
ConllSentence object
def parse(self, sentence):
"""Parse raw sentence into ConllSentence
Para... |
Apply weight drop to the parameter of a block.
Parameters
----------
block : Block or HybridBlock
The block whose parameter is to be applied weight-drop.
local_param_regex : str
The regex for parameter names used in the self.params.get(), such as 'weight'.
rate : float
Fract... |
create rnn cell given specs
Parameters
----------
mode : str
The type of RNN cell to use. Options are 'lstmpc', 'rnn_tanh', 'rnn_relu', 'lstm', 'gru'.
num_layers : int
The number of RNN cells in the encoder.
input_size : int
The initial input size of in the RNN cell.
hid... |
create rnn layer given specs
def _get_rnn_layer(mode, num_layers, input_size, hidden_size, dropout, weight_dropout):
"""create rnn layer given specs"""
if mode == 'rnn_relu':
rnn_block = functools.partial(rnn.RNN, activation='relu')
elif mode == 'rnn_tanh':
rnn_block = functools.partial(rnn... |
Forward computation.
def hybrid_forward(self, F, x, sampled_values, label, w_all, b_all):
"""Forward computation."""
sampled_candidates, expected_count_sampled, expected_count_true = sampled_values
# (num_sampled, in_unit)
w_sampled = w_all.slice(begin=(0, 0), end=(self._num_sampled, No... |
Forward computation.
def hybrid_forward(self, F, x, sampled_values, label, weight, bias):
"""Forward computation."""
sampled_candidates, _, _ = sampled_values
# (batch_size,)
label = F.reshape(label, shape=(-1,))
# (num_sampled+batch_size,)
ids = F.concat(sampled_candida... |
Forward computation.
def forward(self, x, sampled_values, label):
"""Forward computation."""
sampled_candidates, _, _ = sampled_values
# (batch_size,)
label = label.reshape(shape=(-1,))
# (num_sampled+batch_size,)
ids = nd.concat(sampled_candidates, label, dim=0)
... |
Flatten the structure of a nested container to a list.
Parameters
----------
data : A single NDArray/Symbol or nested container with NDArrays/Symbol.
The nested container to be flattened.
flattened : list or None
The container thats holds flattened result.
Returns
-------
st... |
Reconstruct the flattened list back to (possibly) nested structure.
Parameters
----------
structure : An integer or a nested container with integers.
The extracted structure of the container of `data`.
flattened : list or None
The container thats holds flattened result.
Returns
... |
Tile all the states to have batch_size * beam_size on the batch axis.
Parameters
----------
data : A single NDArray/Symbol or nested container with NDArrays/Symbol
Each NDArray/Symbol should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.... |
Parameters
----------
F
samples : NDArray or Symbol
The current samples generated by beam search. Shape (batch_size, beam_size, L)
valid_length : NDArray or Symbol
The current valid lengths of the samples
outputs: NDArray or Symbol
Decoder outp... |
Sample by beam search.
Parameters
----------
F
inputs : NDArray or Symbol
The initial input of the decoder. Shape is (batch_size,).
states : Object that contains NDArrays or Symbols
The initial states of the decoder.
Returns
-------
... |
Returns a copy of this parameter on one context. Must have been
initialized on this context before.
Parameters
----------
ctx : Context
Desired context.
Returns
-------
NDArray on ctx
def data(self, ctx=None):
"""Returns a copy of this parame... |
r"""ELMo 2-layer BiLSTM with 1024 hidden units, 128 projection size, 1 highway layer.
Parameters
----------
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
Options are 'gbw'.
pretrained : bool, default False
Whether to load th... |
Compute context insensitive token embeddings for ELMo representations.
Parameters
----------
inputs : NDArray
Shape (batch_size, sequence_length, max_character_per_token)
of character ids representing the current batch.
Returns
-------
token_embe... |
Parameters
----------
inputs : NDArray
Shape (batch_size, sequence_length, max_character_per_token)
of character ids representing the current batch.
states : (list of list of NDArray, list of list of NDArray)
The states. First tuple element is the forward laye... |
r"""3-layer LSTM language model with weight-drop, variational dropout, and tied weights.
Embedding size is 400, and hidden layer size is 1150.
Parameters
----------
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
Options are 'wikitex... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.