text stringlengths 81 112k |
|---|
Resets environments at given indices.
Subclasses should override _reset to do the actual reset if something other
than the default implementation is desired.
Args:
indices: Indices of environments to reset. If None all envs are reset.
Returns:
Batch of initial observations of reset enviro... |
Takes a step in all environments, shouldn't pre-process or record.
Subclasses should override this to do the actual step if something other
than the default implementation is desired.
Args:
actions: (np.ndarray) with first dimension equal to the batch size.
Returns:
a tuple of stacked raw... |
Takes a step in all environments.
Subclasses should override _step to do the actual reset if something other
than the default implementation is desired.
Args:
actions: Batch of actions.
Returns:
(preprocessed_observations, processed_rewards, dones, infos).
def step(self, actions):
""... |
Data fields to store on disk and their decoders.
def example_reading_spec(self):
"""Data fields to store on disk and their decoders."""
# Subclasses can override and/or extend.
processed_reward_type = tf.float32
if self.is_processed_rewards_discrete:
processed_reward_type = tf.int64
data_f... |
A generator to yield single time-steps from a list of trajectories.
def _generate_time_steps(self, trajectory_list):
"""A generator to yield single time-steps from a list of trajectories."""
for single_trajectory in trajectory_list:
assert isinstance(single_trajectory, trajectory.Trajectory)
# Ski... |
Get lookup table for VQ bottleneck.
def init_vq_bottleneck(bottleneck_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
means = tf.get_variable(
name="means",
shape=[bottleneck_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
... |
Find the nearest element in means to elements in x.
def vq_nearest_neighbor(x, hparams):
"""Find the nearest element in means to elements in x."""
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(t... |
Simple vector quantized discrete bottleneck.
def vq_discrete_bottleneck(x, hparams):
"""Simple vector quantized discrete bottleneck."""
tf.logging.info("Using EMA with beta = {}".format(hparams.beta))
bottleneck_size = 2**hparams.bottleneck_bits
x_shape = common_layers.shape_list(x)
x = tf.reshape(x, [-1, hp... |
Simple undiscretization from vector quantized representation.
def vq_discrete_unbottleneck(x, hparams):
"""Simple undiscretization from vector quantized representation."""
x_shape = common_layers.shape_list(x)
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_flat = tf.reshape(x, [-1, bott... |
A stack of convolution blocks with residual connections.
def residual_conv(x, repeat, k, hparams, name, reuse=None):
"""A stack of convolution blocks with residual connections."""
with tf.variable_scope(name, reuse=reuse):
dilations_and_kernels = [((1, 1), k) for _ in range(3)]
for i in range(repeat):
... |
Decompression function.
def decompress_step(source, hparams, first_relu, name):
"""Decompression function."""
with tf.variable_scope(name):
shape = common_layers.shape_list(source)
multiplier = 2
kernel = (1, 1)
thicker = common_layers.conv_block(
source,
hparams.hidden_size * multi... |
Compress.
def compress(x, hparams, name):
"""Compress."""
with tf.variable_scope(name):
# Run compression by strided convs.
cur = x
k1 = (3, 1)
k2 = (2, 1)
cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc")
for i in range(hparams.num_compress_steps):
cur = common_... |
Transformer preparations and encoder.
def encode(x, x_space, hparams, name):
"""Transformer preparations and encoder."""
with tf.variable_scope(name):
(encoder_input, encoder_self_attention_bias,
ed) = transformer.transformer_prepare_encoder(x, x_space, hparams)
encoder_input = tf.nn.dropout(encoder_i... |
Original Transformer decoder.
def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets,
hparams, name):
"""Original Transformer decoder."""
with tf.variable_scope(name):
targets = common_layers.flatten4d3d(targets)
decoder_input, decoder_self_bias = (
t... |
Latent prediction and loss.
def get_latent_pred_loss(latents_pred, latents_discrete_hot, hparams):
"""Latent prediction and loss."""
latents_logits = tf.layers.dense(
latents_pred, 2**hparams.bottleneck_bits, name="extra_logits")
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradi... |
Main step used for training.
def ae_transformer_internal(inputs, targets, target_space, hparams, cache=None):
"""Main step used for training."""
# Encoder.
inputs = common_layers.flatten4d3d(inputs)
inputs, ed = encode(inputs, target_space, hparams, "input_enc")
# Autoencoding.
losses = {"extra": tf.const... |
Set of hyperparameters.
def transformer_nat_small():
"""Set of hyperparameters."""
hparams = transformer.transformer_small()
hparams.batch_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 4000
hparams.num_hidden_layers = 3
hparams.hidden_size = 384
hparams.filter_size = 2048... |
Set of hyperparameters.
def transformer_nat_base():
"""Set of hyperparameters."""
hparams = transformer_nat_small()
hparams.batch_size = 2048
hparams.hidden_size = 512
hparams.filter_size = 4096
hparams.num_hidden_layers = 6
return hparams |
Set of hyperparameters.
def transformer_nat_big():
"""Set of hyperparameters."""
hparams = transformer_nat_small()
hparams.batch_size = 2048
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_hidden_layers = 6
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
return hpa... |
A policy net function.
def policy_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers=None):
"""A policy net function."""
# Use the bottom_layers as the bottom part of the network and just add the
# required layers on top of it.
if bottom_layers is Non... |
A value net function.
def value_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers=None):
"""A value net function."""
del num_actions
if bottom_layers is None:
bottom_layers = []
bottom_layers.extend([
layers.Dense(1),
])
net = layers.Seri... |
A policy and value net function.
def policy_and_value_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers=None):
"""A policy and value net function."""
# Layers.
cur_layers = []
if bottom_layers is not None:
cur_layer... |
Dumps the params with `logging.error`.
def log_params(params, name="params"):
"""Dumps the params with `logging.error`."""
for i, param in enumerate(params):
if not param:
# Empty tuple.
continue
if not isinstance(param, (list, tuple)):
logging.error(
"%s[%d] : (%s) = [%s]", nam... |
Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
num_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-samp... |
Returns the padding value given a dtype.
def get_padding_value(dtype):
"""Returns the padding value given a dtype."""
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32:
padding_value = 0.0
else:
... |
Pad trajectories to a bucket length that is a multiple of boundary.
Args:
trajectories: list[(observation, actions, rewards)], where each observation
is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the
length of the list being B (batch size).
boundary: int, bucket length, the a... |
r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mas... |
Computes the value loss.
Args:
value_net_apply: value net apply function with signature (params, ndarray of
shape (B, T+1) + OBS) -> ndarray(B, T+1, 1)
value_net_params: params of value_net_apply.
observations: np.ndarray of shape (B, T+1) + OBS
rewards: np.ndarray of shape (B, T) of rewards.
... |
Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
Returns:
The average L2 val... |
r"""Computes TD-residuals from V(s) and rewards.
Where a `delta`, i.e. a td-residual is defined as:
delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}.
Args:
predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was
squeezed. These represent V(s_bt) for b < B and t < T+1
rewards: nd... |
r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, ... |
Picks out the probabilities of the actions along batch and time-steps.
Args:
probab_observations: ndarray of shape `[B, T+1, A]`, where
probab_observations[b, t, i] contains the log-probability of action = i at
the t^th time-step in the b^th trajectory.
actions: ndarray of shape `[B, T]`, with ea... |
Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, b... |
PPO objective, with an eventual minus sign, given observations.
def ppo_loss(policy_net_apply,
new_policy_params,
old_policy_params,
value_net_apply,
value_net_params,
padded_observations,
padded_actions,
padded_rewards,
... |
PPO objective, with an eventual minus sign, given predictions.
def ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
predicted_values,
padded_actions,
padded_rewards,
... |
Computes the combined (clipped loss + value loss) given predictions.
def combined_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_prediction,
padded_actions,
... |
Computes the combined (clipped loss + value loss) given observations.
def combined_loss(new_params,
old_params,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
... |
PPO optimizer step.
def ppo_opt_step(i,
opt_state,
ppo_opt_update,
policy_net_apply,
old_policy_params,
value_net_apply,
value_net_params,
padded_observations,
padded_actions,
... |
Value optimizer step.
def value_opt_step(i,
opt_state,
opt_update,
value_net_apply,
padded_observations,
padded_rewards,
reward_mask,
gamma=0.99):
"""Value optimizer step."""
value_p... |
Policy and Value optimizer step.
def policy_and_value_opt_step(i,
opt_state,
opt_update,
policy_and_value_net_apply,
old_params,
padded_observations,
... |
Runs the training loop for PPO, with fixed policy and value nets.
def training_loop(env=None,
env_name="CartPole-v0",
epochs=EPOCHS,
policy_net_fun=None,
value_net_fun=None,
policy_and_value_net_fun=None,
policy... |
Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string
def _maybe_download_corpora(tmp_dir):
"""Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string
"""
mnli_filename = "MNLI.zip"
mnli_finalpath = os.path.join(tmp_dir, "MNLI")
if not tf.gfile.Ex... |
Generate mnli examples.
Args:
filename: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
def _example_generator(filename):
"""Generate mnli examples.
Args:
filename: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
""... |
Adds a residual connection to the filter x for the shake-shake model.
def shake_shake_skip_connection(x, output_filters, stride, is_training):
"""Adds a residual connection to the filter x for the shake-shake model."""
curr_filters = common_layers.shape_list(x)[-1]
if curr_filters == output_filters:
return x... |
Building a 2 branching convnet.
def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward,
hparams):
"""Building a 2 branching convnet."""
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
x = tf.nn.relu(x)
x = tf.layers.conv2d(
x,
output_filters, ... |
Builds a full shake-shake sub layer.
def shake_shake_block(x, output_filters, stride, hparams):
"""Builds a full shake-shake sub layer."""
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
batch_size = common_layers.shape_list(x)[0]
# Generate random numbers for scaling the branches.
rand_forward = ... |
Builds many sub layers into one full layer.
def shake_shake_layer(x, output_filters, num_blocks, stride, hparams):
"""Builds many sub layers into one full layer."""
for block_num in range(num_blocks):
curr_stride = stride if (block_num == 0) else 1
with tf.variable_scope("layer_{}".format(block_num)):
... |
Parameters for CIFAR-10. Gets to about 96% accuracy@700K steps, 1 GPU.
def shakeshake_small():
"""Parameters for CIFAR-10. Gets to about 96% accuracy@700K steps, 1 GPU."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.hidden_size = 32
hparams.layer_prepostprocess_dropout = 0.0
h... |
Check if metric has plateaued.
A metric has plateaued if the value has not increased/decreased (depending on
`decrease`) by `delta` for at least `num_steps`.
Args:
steps: list<int> list of global steps for values.
values: list<float> list of metric values.
num_steps: int, number of steps the metric ... |
SAVP model hparams.
def next_frame_savp():
"""SAVP model hparams."""
hparams = sv2p_params.next_frame_sv2p()
hparams.add_hparam("z_dim", 8)
hparams.add_hparam("num_discriminator_filters", 32)
hparams.add_hparam("use_vae", True)
hparams.add_hparam("use_gan", False)
hparams.add_hparam("use_spectral_norm", ... |
SAVP - VAE only model.
def next_frame_savp_vae():
"""SAVP - VAE only model."""
hparams = next_frame_savp()
hparams.use_vae = True
hparams.use_gan = False
hparams.latent_loss_multiplier = 1e-3
hparams.latent_loss_multiplier_schedule = "linear_anneal"
return hparams |
SAVP - GAN only model.
def next_frame_savp_gan():
"""SAVP - GAN only model."""
hparams = next_frame_savp()
hparams.use_gan = True
hparams.use_vae = False
hparams.gan_loss_multiplier = 0.001
hparams.optimizer_adam_beta1 = 0.5
hparams.learning_rate_constant = 2e-4
hparams.gan_loss = "cross_entropy"
hpa... |
Default hyperparameters for a DietAdamOptimizer.
Returns:
a hyperparameters object.
def diet_adam_optimizer_params():
"""Default hyperparameters for a DietAdamOptimizer.
Returns:
a hyperparameters object.
"""
return hparam.HParams(
quantize=True, # use 16-bit fixed-point
quantization_s... |
A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape... |
Quantize x according to params, optionally randomizing the rounding.
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, t... |
Dequantize q according to params.
def _dequantize(q, params):
"""Dequantize q according to params."""
if not params.quantize:
return q
return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale |
Create a custom variable getter for diet variables according to params.
def make_diet_var_getter(params):
"""Create a custom variable getter for diet variables according to params."""
def diet_var_initializer(shape, dtype, partition_info=None):
"""Initializer for a diet variable."""
del dtype
del part... |
Call function with args; use diet variables according to params.
def _fn_with_diet_vars(fn, args, params):
"""Call function with args; use diet variables according to params."""
vs_ctr = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Custom gradient function."""
del outputs # recomputing... |
Decorator for graph-building function to use diet variables.
def fn_with_diet_vars(params):
"""Decorator for graph-building function to use diet variables."""
params = copy.copy(params)
def dec(fn):
def wrapped(*args):
return _fn_with_diet_vars(fn, args, params)
return wrapped
return dec |
Create the factorized Adam accumulators for diet variables.
def create_slots(self, var):
"""Create the factorized Adam accumulators for diet variables."""
params = self.params
shape = var.get_shape().as_list()
if not hasattr(params, "slots"):
params.slots = defaultdict(dict)
name = var.op.n... |
Update the variable and its slots.
def update_variable(self, var, grad_var):
"""Update the variable and its slots."""
params = self.params
global_step = tf.to_float(self.global_step) + 1
# compute learning rate
lrate = params.learning_rate
if params.learning_rate_decay_scheme == "noam":
... |
Construct EstimatorSpec for EVAL mode.
def estimator_spec_eval(
self, features, logits, labels, loss, restore_hook, use_tpu):
"""Construct EstimatorSpec for EVAL mode."""
hparams = self.hparams
problem = hparams.problem
if logits.get_shape().ndims == 3:
logits = tf.expand_dims(tf.expand_dim... |
Generator for the dataset samples.
If not present, download and extract the dataset.
Args:
tmp_dir: path to the directory where to download the dataset.
pb_cst: CodingPbConstants object defining paths
Yields:
A CodingPbInfo object containing the next challenge informations.
def generator_samples(t... |
Adds a stack of LSTM layers on top of input.
Args:
inputs: The input `Tensor`, shaped `[batch_size, time_steps, hidden_size]`.
sequence_length: Lengths of the actual input sequence, excluding padding; a
`Tensor` shaped `[batch_size]`.
hparams: HParams; hyperparameters.
train: bool; `True` whe... |
Run LSTM cell with attention on inputs of shape [batch x time x size].
Args:
inputs: The decoder input `Tensor`, shaped `[batch_size, decoder_steps,
hidden_size]`.
hparams: HParams; hyperparameters.
train: bool; `True` when constructing training graph to enable dropout.
name: string; Create v... |
The basic LSTM seq2seq model, main step used for training.
def lstm_seq2seq_internal(inputs, targets, hparams, train):
"""The basic LSTM seq2seq model, main step used for training."""
with tf.variable_scope("lstm_seq2seq"):
if inputs is not None:
inputs_length = common_layers.length_from_embedding(inputs... |
LSTM seq2seq model with attention, main step used for training.
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train,
inputs_length, targets_length):
"""LSTM seq2seq model with attention, main step used for training."""
with tf.variable_scope("lstm_seq2seq_attenti... |
Bidirectional LSTM for encoding inputs that are [batch x time x size].
def lstm_bid_encoder(inputs, sequence_length, hparams, train, name):
"""Bidirectional LSTM for encoding inputs that are [batch x time x size]."""
with tf.variable_scope(name):
cell_fw = tf.nn.rnn_cell.MultiRNNCell(
[_dropout_lstm_c... |
The basic LSTM seq2seq model with bidirectional encoder.
def lstm_seq2seq_internal_bid_encoder(inputs, targets, hparams, train):
"""The basic LSTM seq2seq model with bidirectional encoder."""
with tf.variable_scope("lstm_seq2seq_bid_encoder"):
if inputs is not None:
inputs_length = common_layers.length_f... |
LSTM seq2seq model with attention, main step used for training.
def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams,
train):
"""LSTM seq2seq model with attention, main step used for training."""
with tf.variable_scope("lstm_seq2seq_attention_bid_... |
hparams for LSTM.
def lstm_seq2seq():
"""hparams for LSTM."""
hparams = common_hparams.basic_params1()
hparams.daisy_chain_variables = False
hparams.batch_size = 1024
hparams.hidden_size = 128
hparams.num_hidden_layers = 2
hparams.initializer = "uniform_unit_scaling"
hparams.initializer_gain = 1.0
hp... |
Base attention params.
def lstm_attention_base():
"""Base attention params."""
hparams = lstm_seq2seq()
hparams.add_hparam("attention_layer_size", hparams.hidden_size)
hparams.add_hparam("output_attention", True)
hparams.add_hparam("num_heads", 1)
return hparams |
Basic LSTM Params.
def lstm_asr_v1():
"""Basic LSTM Params."""
hparams = lstm_bahdanau_attention()
hparams.num_hidden_layers = 2
hparams.hidden_size = 256
hparams.batch_size = 36
hparams.max_input_seq_length = 600000
hparams.max_target_seq_length = 350
hparams.max_length = hparams.max_input_seq_length
... |
Hparams for LSTM with area attention.
def lstm_area_attention_base():
"""Hparams for LSTM with area attention."""
hparams = lstm_luong_attention()
hparams.batch_size = 16384
hparams.num_hidden_layers = 2
hparams.hidden_size = 1024
hparams.num_heads = 4
hparams.dropout = 0.2
hparams.learning_rate = 0.1
... |
Create a run config.
Args:
hp: model hyperparameters
Returns:
a run config
def create_surrogate_run_config(hp):
"""Create a run config.
Args:
hp: model hyperparameters
Returns:
a run config
"""
save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)
save_ckpt_secs... |
Construct input pipeline.
def prepare_data(problem, hparams, params, config):
"""Construct input pipeline."""
input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.EVAL, hparams, force_repeat=True)
dataset = input_fn(params, config)
features, _ = dataset.make_one_shot_iterator().get_next()
... |
Transform a string with a filename into a list of float32.
Args:
s: path to the file with a waveform.
Returns:
samples: list of int16s
def encode(self, s):
"""Transform a string with a filename into a list of float32.
Args:
s: path to the file with a waveform.
Returns:
s... |
Transform a sequence of float32 into a waveform.
Args:
ids: list of integers to be converted.
Returns:
Path to the temporary file where the waveform was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
def decode(self, ids):
"""Transform a sequence of float32 ... |
Creates and returns a new vertex.
Returns:
A new Vertex instance with a unique index.
def new_vertex(self):
"""Creates and returns a new vertex.
Returns:
A new Vertex instance with a unique index.
"""
vertex = Vertex(len(self.vertices))
self.vertices.append(vertex)
return vert... |
Returns or Creates a Vertex mapped by key.
Args:
key: A string reference for a vertex. May refer to a new Vertex in which
case it will be created.
Returns:
A the Vertex mapped to by key.
def get_vertex(self, key):
"""Returns or Creates a Vertex mapped by key.
Args:
key: A st... |
Returns a new edge connecting source and target vertices.
Args:
source: The source Vertex.
target: The target Vertex.
Returns:
A new Edge linking source to target.
def add_edge(self, source, target):
"""Returns a new edge connecting source and target vertices.
Args:
source: T... |
Returns a simplified dictionary representing the Graph.
Returns:
A dictionary that can easily be serialized to JSON.
def to_dict(self):
"""Returns a simplified dictionary representing the Graph.
Returns:
A dictionary that can easily be serialized to JSON.
"""
return {
"node": ... |
Self-attention layer with source as memory antecedent.
def attend(x, source, hparams, name):
"""Self-attention layer with source as memory antecedent."""
with tf.variable_scope(name):
x = tf.squeeze(x, axis=2)
if len(source.get_shape()) > 3:
source = tf.squeeze(source, axis=2)
source = common_att... |
Calculate softmax(x), select top-k and rescale to sum to 1.
def top_k_softmax(x, k):
"""Calculate softmax(x), select top-k and rescale to sum to 1."""
x = tf.nn.softmax(x)
top_x, _ = tf.nn.top_k(x, k=k+1)
min_top = tf.reduce_min(top_x, axis=-1, keepdims=True)
x = tf.nn.relu((x - min_top) + 1e-12)
x /= tf.r... |
Compress.
def compress(x, c, is_2d, hparams, name):
"""Compress."""
with tf.variable_scope(name):
# Run compression by strided convs.
cur = x
k1 = (3, 3) if is_2d else (3, 1)
k2 = (2, 2) if is_2d else (2, 1)
cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc")
if c is not... |
Original Transformer decoder.
def decode_transformer(encoder_output,
encoder_decoder_attention_bias,
targets,
hparams,
name,
task=None,
causal=True):
"""Original Transformer decod... |
Latent prediction and loss.
def ae_latent_softmax(latents_pred, latents_discrete, hparams):
"""Latent prediction and loss."""
vocab_size = 2 ** hparams.z_size
if hparams.num_decode_blocks < 2:
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="extra_logits")... |
Sample from the latent space in the autoencoder.
def ae_latent_sample(latents_dense, inputs, ed, embed, iters, hparams):
"""Sample from the latent space in the autoencoder."""
if hparams.num_decode_blocks < 2 and hparams.sampling_temp == 0.0:
# TODO(lukaszkaiser): beam-search only works in non-blocked mode for... |
AE Transformer, main step used for training.
def ae_transformer_internal(inputs,
targets,
target_space,
hparams,
cache=None,
predict_mask=1.0):
"""AE Transformer, main step used... |
Set of hyperparameters.
def transformer_ae_small():
"""Set of hyperparameters."""
hparams = transformer.transformer_small()
hparams.batch_size = 2048
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 4000
hparams.num_hidden_layers = 3
hparams.hidden_size = 384
hparams.filter_size = 2048
... |
Hyperparameters for CIFAR-10 experiments.
def imagetransformer_ae_cifar():
"""Hyperparameters for CIFAR-10 experiments."""
hparams = transformer_ae_small()
hparams.filter_size = 512
hparams.num_compress_steps = 3
hparams.startup_steps = 10000
hparams.is_2d = 0
hparams.learning_rate_warmup_steps = 8000
... |
For 64x64 ImageNet. ~56M trainable variables.
def imagetransformer_ae_imagenet():
"""For 64x64 ImageNet. ~56M trainable variables."""
hparams = imagetransformer_ae_cifar()
hparams.max_length = int(64 * 64 * 3)
hparams.img_len = 64
hparams.num_heads = 4 # Heads are expensive on TPUs.
# Reduce architecture ... |
Set of hyperparameters.
def transformer_ae_base():
"""Set of hyperparameters."""
hparams = transformer_ae_small()
hparams.batch_size = 2048
hparams.hidden_size = 512
hparams.filter_size = 4096
hparams.num_hidden_layers = 6
return hparams |
Set of hyperparameters.
def transformer_ae_a3():
"""Set of hyperparameters."""
hparams = transformer_ae_base()
hparams.batch_size = 4096
hparams.layer_prepostprocess_dropout = 0.3
hparams.optimizer = "Adafactor"
hparams.learning_rate = 0.25
hparams.learning_rate_warmup_steps = 10000
return hparams |
Set of hyperparameters.
def transformer_ae_base_noatt():
"""Set of hyperparameters."""
hparams = transformer_ae_base()
hparams.reshape_method = "slice"
hparams.bottleneck_kind = "dvq"
hparams.hidden_size = 512
hparams.num_blocks = 1
hparams.num_decode_blocks = 1
hparams.z_size = 12
hparams.do_attend_... |
Set of hyperparameters.
def transformer_ae_small_noatt():
"""Set of hyperparameters."""
hparams = transformer_ae_small()
hparams.reshape_method = "slice"
hparams.bottleneck_kind = "dvq"
hparams.hidden_size = 512
hparams.num_blocks = 1
hparams.num_decode_blocks = 1
hparams.z_size = 12
hparams.do_atten... |
Basic transformer_sketch hparams.
def transformer_sketch():
"""Basic transformer_sketch hparams."""
hparams = transformer.transformer_small()
hparams.num_compress_steps = 4
hparams.batch_size = 32
hparams.clip_grad_norm = 2.
hparams.sampling_method = "random"
return hparams |
Get the layers module good for TF 1 and TF 2 work for now.
def layers():
"""Get the layers module good for TF 1 and TF 2 work for now."""
global _cached_layers
if _cached_layers is not None:
return _cached_layers
layers_module = tf.layers
try:
from tensorflow.python import tf2 # pylint: disable=g-di... |
Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tens... |
Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", values=[x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.