text stringlengths 81 112k |
|---|
Import module at usr_dir, if provided.
def import_usr_dir(usr_dir):
"""Import module at usr_dir, if provided."""
if not usr_dir:
return
if usr_dir == INTERNAL_USR_DIR_PACKAGE:
# The package has been installed with pip under this name for Cloud ML
# Engine so just import it.
importlib.import_modul... |
A set of basic hyperparameters.
def basic_params1():
"""A set of basic hyperparameters."""
return hparam.HParams(
# If the problem consists of variable-length sequences
# (see problem.batch_size_means_tokens()), then this is the number
# of tokens per batch per GPU or per TPU core. Otherwise, th... |
A basic range of hyperparameters.
def basic_range1(ranged_hparams):
"""A basic range of hyperparameters."""
rhp = ranged_hparams
rhp.set_discrete("batch_size", [1024, 2048, 4096])
rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6])
rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_... |
Check if name is in orig_ctr or in one of the other type containers.
def _check_reset_and_type_change(self, name, orig_ctr):
"""Check if name is in orig_ctr or in one of the other type containers."""
# Resetting a hyperparameter
if name in orig_ctr:
tf.logging.warning("Overwriting hparam %s", name)
... |
To list of dicts suitable for Cloud ML Engine hyperparameter tuning.
def to_parameter_specs(self, name_prefix=""):
"""To list of dicts suitable for Cloud ML Engine hyperparameter tuning."""
specs = []
for name, categories, _ in self._categorical_params.values():
spec = {
"parameterName": na... |
Create and register problems for the game.
Args:
game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist".
game_mode: the frame skip and sticky keys config.
Raises:
ValueError: if game_name or game_mode are wrong.
def register_game(game_name, game_mode="NoFrameskip-v4"):
"""Create and reg... |
Decodes a single observation from PNG.
def _decode_png(self, encoded_observation):
"""Decodes a single observation from PNG."""
return self._session.obj.run(
self._decoded_image_t.obj,
feed_dict={self._encoded_image_p.obj: encoded_observation}
) |
Encodes observations as PNG.
def _encode_observations(self, observations):
"""Encodes observations as PNG."""
return [
Observation(
self._session.obj.run(
self._encoded_image_t.obj,
feed_dict={self._decoded_image_p.obj: observation}
),
... |
Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been lo... |
Resets environments at given indices.
Does any preprocessing and adds rollouts to history.
Args:
indices: Indices of environments to reset.
Returns:
Batch of initial observations of reset environments.
Raises:
ValueError: when there's no current epoch.
def reset(self, indices=None... |
Additional data fields to store on disk and their decoders.
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
field_names = ("frame_number", "action", "reward", "done")
data_fields = {
name: tf.FixedLenFeature([1], tf.int64) for name in field_names
... |
Splits frames in the current epoch according to self.dataset_splits.
Rollouts can be broken on shard boundary. This is desirable when we have
few long rollouts and we want to make sure we have data in the dev set.
def _split_current_epoch(self):
"""Splits frames in the current epoch according to self.data... |
List of pairs (split, paths) for the current epoch.
def splits_and_paths(self, data_dir):
"""List of pairs (split, paths) for the current epoch."""
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.T... |
Saves the current epoch rollouts to disk, split into train/dev sets.
def generate_data(self, data_dir, tmp_dir=None, task_id=-1):
"""Saves the current epoch rollouts to disk, split into train/dev sets."""
if not self._rollouts_by_epoch_and_split[self.current_epoch]:
# Data not loaded from disk.
sel... |
Sets the state that will be used on next reset.
def set_initial_state(self, initial_state, initial_frames):
"""Sets the state that will be used on next reset."""
self._initial_state = initial_state
self._initial_frames = initial_frames[:, -1, ...]
self._should_preprocess_on_reset = False |
Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:... |
Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Ar... |
image resize function used by quite a few image problems.
def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA)) |
Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one fo... |
Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisib... |
Yield images encoded as pngs.
def encode_images_as_png(images):
"""Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeho... |
Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string ... |
Image augmentation: cropping, flipping, and color transforms.
def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.... |
Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images.
def cifar_image_augmentation(images):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.o... |
Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images trans... |
Get the common attention and feed-forward layers.
The returned layer functions will have the following signature:
y, extra_loss = fct(x)
extra_loss is set to 0.0 if the layer doesn't have extra loss.
If dp is provided, the layers will be distributed within the devices.
If moe wants to be used, both dp an... |
Adds the hparams used by get_standardized_layers.
def add_standard_attention_hparams(hparams):
"""Adds the hparams used by get_standardized_layers."""
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
# hparams used and which should have been defined outside (... |
Computes encdec attention loss between expected and actual attentions.
Args:
expected_attention_logits: Tensor storing the expected encoder-decoder
attention logits with shape [batch_size, target_length, input_length].
actual_attentions: Dictionary with actual attention logits for different
atten... |
Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inpu... |
Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
... |
get n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
channels: dimension of the timing signal
layer: layer num
num_layers: total number of layers
Returns:
a Tensor of timing signals [1, 1, channels].
def g... |
Add n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
x: a tensor with shape [batch, length, depth]
layer: layer num
num_layers: total number of layers
Returns:
a Tensor the same shape as x.
def add_layer_t... |
Add sinusoids of different frequencies as layer (vertical) timing signal.
Args:
channels: dimension of the timing signal
layer: layer num
num_layers: total number of layers
Returns:
a Tensor of timing signals [1, 1, channels].
def get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers):
... |
Add sinusoids of different frequencies as layer (vertical) timing signal.
Args:
x: a Tensor with shape [batch, length, channels]
layer: layer num
num_layers: total number of layers
Returns:
a Tensor the same shape as x.
def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers):
"""Add sinus... |
Adds sinusoids of diff frequencies to a Tensor, with timing position given.
Args:
x: a Tensor with shape [batch, length, channels]
position: a Tensor with shape [batch, length]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
def add_timing_signal_1d_given_... |
Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase in one of the positional dimensions.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some pr... |
Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x.
def add_... |
Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dime... |
Gets edge vectors for the edge types in the adjacency matrix.
Args:
adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints.
num_edge_types: Number of different edge types
depth: Number of channels
name: a string
Returns:
A [batch, num_nodes, num_nodes, depth] vector of tensors
def ma... |
Calculate the length of mask based on padding.
Args:
padding: a Tensor with shape [..., length].
Returns:
a Tensor with shape [...].
def padding_to_length(padding):
"""Calculate the length of mask based on padding.
Args:
padding: a Tensor with shape [..., length].
Returns:
a Tensor with sha... |
Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative values
indicate unlimite... |
Create an bias tensor to be added to attention logits.
Positions with the same segment_ids can see each other.
Args:
query_segment_id: a float `Tensor` with shape [batch, query_length].
memory_segment_id: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, ... |
Create an bias tensor to be added to attention logits.
Args:
memory_padding: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, 1, memory_length].
def attention_bias_ignore_padding(memory_padding):
"""Create an bias tensor to be added to attention logits.
A... |
Inverse of attention_bias_ignore_padding().
Args:
attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as
returned by attention_bias_ignore_padding().
cast_fn: function used to cast to output type.
Returns:
a Tensor with shape [batch, memory_length] with 1.0 in padding positions
... |
Create a bias tensor for prepend_mode="prepend_inputs_full_attention".
See prepend_inputs in common_hparams.py.
Produces a bias tensor to be used in self-attention.
This bias tensor allows for full connectivity in the "inputs" part of
the sequence and masked connectivity in the targets part.
Args:
pad... |
Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
def attention_bias_proximal(length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
R... |
Generate a mask to prevent the batch to attend to each others.
Args:
batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the
coordinates of the batches
batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the
coordinates of the batches. If None, do self-attent... |
Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
def split_last_dimension(x, n):
"""Reshape x so that the last dimension becomes two dimensions.
... |
Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
def combine_last_two_dimensions(x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with ... |
Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...]
def combine_first_two_dimensions(x):
"""Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor wi... |
Compute color image summary.
Args:
attn: a Tensor with shape [batch, num_heads, query_length, memory_length]
image_shapes: optional tuple of integer scalars.
If the query positions and memory positions represent the
pixels of flattened images, then pass in their dimensions:
(query_rows, q... |
Multi-head dot-product attention with sparsity.
For each attention head, the queries are partitioned into groups.
For each group, only a subset of the key-value pairs are considered.
The choices of groups are selected based on trained predictors of
the total attention given the group inclusion.
memory_targ... |
Make attention weights non-0 only on the top-hard_attention_k ones.
def harden_attention_weights(weights, hard_attention_k):
"""Make attention weights non-0 only on the top-hard_attention_k ones."""
# Subtract the top-kth weight and zero-out all lower ones.
# Note that currently in case of numerical ties it will... |
Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
... |
Generates matrix of relative positions between inputs.
def _generate_relative_positions_matrix(length_q, length_k,
max_relative_position,
cache=False):
"""Generates matrix of relative positions between inputs."""
if not cache:
if l... |
Generates tensor of size [1 if cache else length_q, length_k, depth].
def _generate_relative_positions_embeddings(length_q, length_k, depth,
max_relative_position, name,
cache=False):
"""Generates tensor of size [1 if cache else ... |
Relative position-aware dot-product attention inner calculation.
This batches matrix multiply calculations to avoid unnecessary broadcasting.
Args:
x: Tensor with shape [batch_size, heads, length or 1, length or depth].
y: Tensor with shape [batch_size, heads, length or 1, depth].
z: Tensor with shape... |
Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, he... |
Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
The dimensions of the output represent:
[batch, heads, query_position, memory_position]
... |
Calculate relative position-aware dot-product self-attention.
Only works for masked self-attention (no looking forward).
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch... |
Helper function for dot_product_unmasked_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position]
The dimensions of the output represent:
[batch, heads, query_position, memory_position - query_positio... |
Instantiate or retrieve relative embeddings, sliced according to length.
Use for unmasked case where the relative attention looks both left and right.
Args:
max_relative_position: an Integer for the number of entries in the relative
embedding, which corresponds to the max relative distance that is
... |
Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, he... |
Helper function for dot_product_unmasked_self_attention_relative_2d.
def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding):
"""Helper function for dot_product_unmasked_self_attention_relative_2d."""
if heads_share_relative_embedding:
ret = tf.einsum("bhxyd,md->bhxym", x, y)
else:
ret = ... |
Calculate relative position unmasked dot-product self-attention 2d.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v in
height and width dimensions. for query index (i,j) and key index (l, m),
the logit is q_i k... |
Helper function for local 2d attention.
Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks,
height, width, depth] and returns two tensors which contain every alternate
position along the width
Args:
x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks,
height, wi... |
Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dimension.
Args:
x: a 6-d tensor.... |
Helper function for local 2d attention.
Args:
x: a [batch, height, width, depth] tensor
block_h: An integer. block height
block_w: An inteter. block width
returns:
a [batch, num_heads, height/block_h, width/block_w, depth] tensor
def _extract_blocks(x, block_h, block_w):
"""Helper function for ... |
Stitches together the local 2d memory blocks.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+... |
Gathering memory blocks around query blocks. flange is half of query .
Only works if memory flanges are half of query sizes.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num... |
Calculate unmasked dot-product local self-attention 2d on tpu.
Args:
q: a Tensor with shape [batch, heads, height, width, depth].
k: a Tensor with shape [batch, heads, height, width, depth].
v: a Tensor with shape [batch, heads, height, width, depth].
bias: bias Tensor.
max_relative_position: an ... |
Calculate simple unmasked dot-product local self-attention 2d on tpu.
The query, key, and value blocks are the same. We do not do a second linear
transformation after computing the values
Args:
x: a Tensor with shape [batch, height, width, depth].
bias: bias Tensor.
total_key_depth: the dimensions o... |
Attention to the source and a neighborhood to the left within a block.
The sequence is divided into blocks of length block_length. Attention for a
given query position can only see memory positions less than or equal to the
query position in the corresponding block.
Args:
q: a Tensor with shape [batch, he... |
Converts tensor from relative to aboslute indexing for local attention.
Args:
x: a Tensor of shape [batch (or batch*num_blocks), heads,
length, 2 * length - 1]
Returns:
A Tensor of shape [batch (or batch*num_blocks), heads, length, length-1]
def _relative_position_to_absolute_po... |
Attention to the source position and a neighborhood to the left of it.
The sequence is divided into blocks of length block_length. Attention for a
given query position can only see memory positions less than or equal to the
query position, in the corresponding block and the previous block.
Args:
q: a Tens... |
Helper function to create a local version of the keys or values for 1d.
def _make_local_block(x, depth, batch, heads, num_blocks, block_length):
"""Helper function to create a local version of the keys or values for 1d."""
prev_block = tf.slice(x, [0, 0, 0, 0, 0],
[-1, -1, num_blocks - 1, -... |
Masked local 1d attention with relative positions.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
If mask_right is True, then a target po... |
Strided block local self-attention.
The sequence is divided into blocks of length block_length. Attention for a
given query position can see all memory positions in the corresponding block
and filter_width many positions to the left and right of the block.
Args:
q: a Tensor with shape [batch, heads, lengt... |
Reshapes input by splitting its length over blocks of memory_block_size.
Args:
x: a Tensor with shape [batch, heads, length, depth]
x_shape: tf.TensorShape of x.
memory_block_size: Integer which divides length.
Returns:
Tensor with shape
[batch, heads, length // memory_block_size, memory_block... |
Dilated self-attention.
Args:
q: a Tensor with shape [batch, heads, length, depth]
k: a Tensor with shape [batch, heads, length, depth]
v: a Tensor with shape [batch, heads, length, depth]
query_block_size: an integer indicating size of query block
memory_block_size: an integer indicating the siz... |
Gathers blocks with gaps in between.
Args:
x: Tensor of shape [length, batch, heads, depth]
num_memory_blocks: how many memory blocks to look in "direction". Each will
be separated by gap_size.
gap_size: an integer indicating the gap size
query_block_size: an integer indicating size of query bl... |
Dilated self-attention. TODO(avaswani): Try it and write a paper on it.
Args:
q: a Tensor with shape [batch, heads, length, depth]
k: a Tensor with shape [batch, heads, length, depth]
v: a Tensor with shape [batch, heads, length, depth]
query_block_size: an integer
memory_block_size: an integer i... |
Strided block local self-attention.
The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention
for a given query position can only see memory positions less than or equal to
the query position. The memory positions are the corresponding block with
memory_flange many positions to add to the hei... |
Making sure x is a multiple of shape.
Args:
x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
block_shape: a 2-d list of integer shapes
Returns:
padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
def pad_to_multiple_2d(x, block_shape):
"""Making sure x is a multipl... |
Reshapes a tensor between dimensions i and j.
def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j."""
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape) |
Gathers flattened blocks from x.
def gather_blocks_2d(x, indices):
"""Gathers flattened blocks from x."""
x_shape = common_layers.shape_list(x)
x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])])
# [length, batch, heads, dim]
x_t = tf.transpose(x, [2, 0, 1, 3])
x_new = tf.gather(x_t, indices)
# re... |
scatters blocks from x into shape with indices.
def scatter_blocks_2d(x, indices, shape):
"""scatters blocks from x into shape with indices."""
x_shape = common_layers.shape_list(x)
# [length, batch, heads, dim]
x_t = tf.transpose(
tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3])
... |
Getting gather indices.
def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices."""
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
... |
Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flang... |
Get the memory regions that surround a 2d query.
The memory regions will be the left and top right.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
query_block_shape: a 2-d tuple of integers
memory_flange: a 2-d tuple of integers
q_indices: a tensor of indices for each of the c... |
Get right shifted blocks for masked local attention 2d.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
indices: The indices to gather blocks
Returns:
x_shifted: a tensor of extracted blocks, each block right shifted along
length.
def get_shifted_center_blocks(x, indices):
"... |
Right shifts once in every block.
Args:
x: a tensor of shape [batch, height, width, depth]
query_shape: A 2d tuple of ints
name: a string
Returns:
output: a tensor of the same shape as x
def right_shift_blockwise(x, query_shape, name=None):
"""Right shifts once in every block.
Args:
x: a... |
Strided block local self-attention.
Each position in a query block can attend to all the generated queries in
the query block, which are generated in raster scan, and positions that are
generated to the left and top. The shapes are specified by query shape and
memory flange. Note that if you're using this func... |
Computes attention compoenent (query, key or value).
Args:
antecedent: a Tensor with shape [batch, length, channels]
total_depth: an integer
filter_width: An integer specifying how wide you want the attention
component to be.
padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No paddi... |
Computes query, key and value.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels]
total_key_depth: an integer
total_value_depth: an integer
q_filter_width: An integer specifying how wide you want the query to ... |
Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_v... |
2d Multihead scaled-dot-product attention with inp/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, h, w, depth_k]
memory_antecedent: a Tensor with shape [batch, h, w, depth_k]
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_he... |
Self-attention feedforward layer.
We use self-attention to do feedforward computations. We apply this function
positionwise where for each position, we linearly transform the output to have
depth filter_depth, and break up the result depth-wise into num_parts
contiguous parts. The parts self-attend, we concate... |
Attention over parameters.
We use the same multi-headed attention as in the other layers, but the memory
keys and values are model parameters. There are no linear transformation on
the keys or values.
We are also a bit more careful about memory usage, since the number of
memory positions may be very large.
... |
Return a tensor with given shape containing coordinate along given axis.
Args:
shape: a Tensor representing the shape of the output Tensor
axis: an integer
Returns:
A tensor with shape shape and type tf.int32, where each elements its
coordinate along the given axis.
def coordinate_tensor(shape, a... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.