text
stringlengths
81
112k
Create a gym env optionally with a time limit and maxskip wrapper. NOTE: The returned env may already be wrapped with TimeLimit! Args: name: `str` - base name of the gym env to make. rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the env as-in, otherwise we impose the requeste...
Registers the class in Gym and returns the registered name and the env. def register_gym_env(class_entry_point, version="v0", kwargs=None): """Registers the class in Gym and returns the registered name and the env.""" split_on_colon = class_entry_point.split(":") assert len(split_on_colon) == 2 class_name = ...
Repeat action, sum reward, and max over last observations. def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) if i == self._skip - 2: self...
Log out and possibly reraise errors during import. def _handle_errors(errors): """Log out and possibly reraise errors during import.""" if not errors: return log_all = True # pylint: disable=unused-variable err_msg = "T2T: skipped importing {num_missing} data_generators modules." print(err_msg.format(nu...
Create HParams with data_dir and problem hparams, if kwargs provided. def create_hparams(hparams_set, hparams_overrides_str="", data_dir=None, problem_name=None, hparams_path=None): """Create HParams with data_dir and problem hparams, if kwa...
Loading hparams from json; can also start from hparams if specified. def create_hparams_from_json(json_path, hparams=None): """Loading hparams from json; can also start from hparams if specified.""" tf.logging.info("Loading hparams from existing json %s" % json_path) with tf.gfile.Open(json_path, "r") as f: ...
Add problem hparams for the problems. def add_problem_hparams(hparams, problem_name_or_instance): """Add problem hparams for the problems.""" if isinstance(problem_name_or_instance, problem_lib.Problem): problem = problem_name_or_instance else: problem = registry.problem(problem_name_or_instance) p_hpa...
Loads exampls from the tsv file. Args: tmp_dir: temp directory. prop_train: proportion of the train data prop_val: proportion of the validation data Returns: All examples in the dataset pluse train, test, and development splits. def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01): """Loa...
Download and extract CIFAR to directory unless it is there. def _get_cifar(directory, url): """Download and extract CIFAR to directory unless it is there.""" filename = os.path.basename(url) path = generator_utils.maybe_download(directory, filename, url) tarfile.open(path, "r:gz").extractall(directory)
Image generator for CIFAR-10 and 100. Args: cifar_version: string; one of "cifar10" or "cifar100" tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which imag...
HParams for PPO base. def rlmb_ppo_base(): """HParams for PPO base.""" hparams = _rlmb_base() ppo_params = dict( base_algo="ppo", base_algo_params="ppo_original_params", # Number of real environments to train on simultaneously. real_batch_size=1, # Number of simulated environments t...
rlmb_dqn_base params. def rlmb_dqn_base(): """rlmb_dqn_base params.""" hparams = _rlmb_base() simulated_rollout_length = 10 dqn_params = dict( base_algo="dqn", base_algo_params="dqn_original_params", real_batch_size=1, simulated_batch_size=16, dqn_agent_generates_trainable_dones=F...
Base setting but quicker with only 2 epochs. def rlmb_ppo_quick(): """Base setting but quicker with only 2 epochs.""" hparams = rlmb_ppo_base() hparams.epochs = 2 hparams.model_train_steps = 25000 hparams.ppo_epochs_num = 700 hparams.ppo_epoch_length = 50 return hparams
Base setting with a stochastic next-frame model. def rlmb_base_stochastic(): """Base setting with a stochastic next-frame model.""" hparams = rlmb_base() hparams.initial_epoch_train_steps_multiplier = 5 hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "next_frame_bas...
Base setting with stochastic discrete model. def rlmb_base_stochastic_discrete(): """Base setting with stochastic discrete model.""" hparams = rlmb_base() hparams.learning_rate_bump = 1.0 hparams.grayscale = False hparams.generative_model = "next_frame_basic_stochastic_discrete" hparams.generative_model_pa...
Long setting with stochastic discrete model & deterministic sim starts. def rlmb_long_stochastic_discrete_simulation_deterministic_starts(): """Long setting with stochastic discrete model & deterministic sim starts.""" hparams = rlmb_base_stochastic_discrete() hparams.generative_model_params = "next_frame_basic_...
Long setting with stochastic discrete model, changed ppo steps. def rlmb_long_stochastic_discrete_100steps(): """Long setting with stochastic discrete model, changed ppo steps.""" hparams = rlmb_long_stochastic_discrete() hparams.ppo_epoch_length = 100 hparams.simulated_rollout_length = 100 hparams.simulated...
Long setting with stochastic discrete model, changed ppo steps. def rlmb_long_stochastic_discrete_25steps(): """Long setting with stochastic discrete model, changed ppo steps.""" hparams = rlmb_long_stochastic_discrete() hparams.ppo_epoch_length = 25 hparams.simulated_rollout_length = 25 hparams.simulated_ba...
Base setting with stochastic discrete model. def rlmb_base_stochastic_discrete_noresize(): """Base setting with stochastic discrete model.""" hparams = rlmb_base() hparams.generative_model = "next_frame_basic_stochastic_discrete" hparams.generative_model_params = "next_frame_basic_stochastic_discrete" hparam...
Base setting with sv2p as world model. def rlmb_base_sv2p(): """Base setting with sv2p as world model.""" hparams = rlmb_base() hparams.learning_rate_bump = 1.0 hparams.generative_model = "next_frame_sv2p" hparams.generative_model_params = "next_frame_sv2p_atari" return hparams
Parameters to override for tiny setting excluding agent-related hparams. def _rlmb_tiny_overrides(): """Parameters to override for tiny setting excluding agent-related hparams.""" return dict( epochs=1, num_real_env_frames=128, model_train_steps=2, max_num_noops=1, eval_max_num_noops=...
Tiny set for testing. def rlmb_ppo_tiny(): """Tiny set for testing.""" hparams = rlmb_ppo_base() hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) update_hparams(hparams, dict( ppo_epochs_num=2, ppo_epoch_length=10, real_ppo_epoch_length=36, real_ppo_effective_num_agents=2, ...
Tiny set for testing. def rlmb_dqn_tiny(): """Tiny set for testing.""" hparams = rlmb_dqn_base() hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) update_hparams(hparams, dict( simulated_rollout_length=2, dqn_time_limit=2, dqn_num_frames=128, real_dqn_replay_buffer_replay_cap...
Tiny setting with a stochastic next-frame model. def rlmb_tiny_stochastic(): """Tiny setting with a stochastic next-frame model.""" hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "nex...
Tiny setting with a recurrent next-frame model. def rlmb_tiny_recurrent(): """Tiny setting with a recurrent next-frame model.""" hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_recurrent" hparams.generative_model_params = "next_fr...
Tiny setting with a tiny sv2p model. def rlmb_tiny_sv2p(): """Tiny setting with a tiny sv2p model.""" hparams = rlmb_ppo_tiny() hparams.generative_model = "next_frame_sv2p" hparams.generative_model_params = "next_frame_sv2p_tiny" hparams.grayscale = False return hparams
Grid over games and frames, and 5 runs each for variance. def rlmb_grid(rhp): """Grid over games and frames, and 5 runs each for variance.""" rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) base = 100000 medium = base // 2 small = medium // 2 rhp.set_discrete("loop.num_real_env_frames", [...
Merge multiple HParams into one with scopes. def merge_unscoped_hparams(scopes_and_hparams): """Merge multiple HParams into one with scopes.""" merged_values = {} for (scope, hparams) in scopes_and_hparams: for key, value in six.iteritems(hparams.values()): scoped_key = "%s.%s" % (scope, key) mer...
Split single HParams with scoped keys into multiple. def split_scoped_hparams(scopes, merged_hparams): """Split single HParams with scoped keys into multiple.""" split_values = {scope: {} for scope in scopes} merged_values = merged_hparams.values() for scoped_key, value in six.iteritems(merged_values): sco...
Create HParams suitable for training loop from scoped HParams. Args: scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These parameters are overrides for the base HParams created by create_loop_hparams. trial_id: str, trial identifier. This is used to register unique HParams ...
Get mapping from keyboard keys to actions. Required by gym.utils.play in environment or top level wrapper. Returns: { Unicode code point for keyboard key: action (formatted for step()), ... } def get_keys_to_action(self): """Get mapping from keyboard keys to actions. Requ...
Pass action to underlying environment(s) or perform special action. def step(self, action): """Pass action to underlying environment(s) or perform special action.""" # Special codes if action in self._player_actions(): envs_step_tuples = self._player_actions()[action]() elif self._wait and action...
Expand observation array with additional information header (top rows). Args: ob: observation reward: reward to be included in header. cumulative_reward: total cumulated reward to be included in header. Returns: Expanded observation array. def _augment_observation(self, ob, reward, cu...
Construct observation, return usual step tuple. Args: envs_step_tuples: tuples. Returns: Step tuple: ob, reward, done, info ob: concatenated images [simulated observation, real observation, difference], with additional informations in header. reward: real environment rewa...
Reset simulated and real environments. def reset(self): """Reset simulated and real environments.""" self._frame_counter = 0 ob_real = self.real_env.reset() # Initialize simulated environment with frames from real one. self.sim_env.add_to_initial_stack(ob_real) for _ in range(3): ob_real,...
Perform step(action) on environments and update initial_frame_stack. def _step_envs(self, action): """Perform step(action) on environments and update initial_frame_stack.""" self._frame_counter += 1 real_env_step_tuple = self.real_env.step(action) sim_env_step_tuple = self.sim_env.step(action) self...
Augment observation, return usual step tuple. def _player_step_tuple(self, envs_step_tuples): """Augment observation, return usual step tuple.""" ob, reward, done, info = envs_step_tuples["env"] ob = self._augment_observation(ob, reward, self.cumulative_reward) return ob, reward, done, info
Compute time first and second-order derivative channels. Args: filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1] name: scope name Returns: float32 tensor with shape [batch_size, len, num_bins, 3] def add_delta_deltas(filterbanks, name=None): """Compute time first and second-orde...
Implement mel-filterbank extraction using tf ops. Args: waveforms: float32 tensor with shape [batch_size, max_len] sample_rate: sampling rate of the waveform dither: stddev of Gaussian noise added to waveform to prevent quantization artefacts preemphasis: waveform high-pass filtering constant ...
Plays the env problem by randomly sampling actions for `num_steps`. def play_env_problem_randomly(env_problem, num_steps): """Plays the env problem by randomly sampling actions for `num_steps`.""" # Reset all environments. env_problem.reset() # Play all environments, sampling ran...
Generates samples of text from the provided vocabulary. Args: plain_vocab: vocabulary. distribution: distribution. train_samples: samples for training. length: length. Returns: train_indices (np.array of Integers): random integers for training. shape = [num_samples, length] test_indi...
Encrypt plain text with a single shift layer. Args: plaintext (list of list of Strings): a list of plain text to encrypt. plain_vocab (list of Integer): unique vocabularies being used. shift (Integer): number of shift, shift to the right if shift is positive. Returns: ciphertext (list of Strings): ...
Encrypt plain text with given key. Args: plaintext (list of list of Strings): a list of plain text to encrypt. plain_vocab (list of Integer): unique vocabularies being used. key (list of Integer): key to encrypt cipher using Vigenere table. Returns: ciphertext (list of Strings): encrypted plain te...
A stack of super_lm layers. Args: inputs: a list of Tensors attention_bias: list of bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model mp: a Parallelism object padding: a string Returns: y: a list of Tensors extra_loss: an op...
Set of hyperparameters. def super_lm_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.hidden_size = 512 hparams.moe_hidden_sizes = "512" hparams.batch_size = 16384 hparams.max_length = 0 # All hyperparameters ending in "dropout" are automatically set to 0.0 # when ...
Add mixture of experts with ~1B params. def super_lm_moe(): """Add mixture of experts with ~1B params.""" hparams = super_lm_base() hparams.layers = ( ("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d") hparams.moe_num_experts = 32 hparams.moe_hidden_sizes = "1024" return hparams
Series of architectural experiments on Translation. # run on 8-core setup 119M params, einsum=0.95e13 Returns: a hparams def xmoe_tr_dense_2k(): """Series of architectural experiments on Translation. # run on 8-core setup 119M params, einsum=0.95e13 Returns: a hparams """ hparams = mtf_...
Mixture of experts (16 experts). 623M Params, einsum=1.09e13 Returns: a hparams def xmoe_tr_1d(): """Mixture of experts (16 experts). 623M Params, einsum=1.09e13 Returns: a hparams """ hparams = xmoe_tr_dense_2k() hparams.encoder_layers = ["self_att", "moe_1d"] * 4 hparams.decoder_layer...
Mixture of experts (16 experts). 623M Params, einsum=1.09e13 Returns: a hparams def xmoe_tr_2d(): """Mixture of experts (16 experts). 623M Params, einsum=1.09e13 Returns: a hparams """ hparams = xmoe_tr_dense_2k() hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.lay...
Series of architectural experiments on cheap language models. For all of these architectures, we run on languagemodel_lm1b8k_packed for 32000 steps. All log-perplexities are per-token - multiply by 1.298 for per-word Results: model params(M) einsum alltoall mxu-util log-ppl xmoe_dense_4k ...
Mixture of experts (16 experts). def xmoe_top_2(): """Mixture of experts (16 experts).""" hparams = xmoe_dense_4k() moe.set_default_moe_hparams(hparams) hparams.mesh_shape = "all:8" hparams.layout = "batch:all;experts:all" return hparams
Two-dimensional hierarchical mixture of 16 experts. def xmoe_2d(): """Two-dimensional hierarchical mixture of 16 experts.""" hparams = xmoe_top_2() hparams.decoder_layers = ["att", "hmoe"] * 4 hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.layout = "outer_batch:b0;inner_batch:b1,expe...
Series of architectural experiments on language modeling. Larger models than the ones above. All models are trained on sequences of 1024 tokens. We assume infinite training data, so no dropout necessary. We process 2^36 tokens in training = 524288 steps at batch size 128 TODO(noam): find a large enough da...
Model incorporating mixture-of-experts and local-attention. ~6B parameters 32 experts in 3 hierarchichal moe layers. Returns: a hparams def xmoe2_v1(): """Model incorporating mixture-of-experts and local-attention. ~6B parameters 32 experts in 3 hierarchichal moe layers. Returns: a hparams ...
128 experts, ~25B params - Train for 131072 steps on 8x8. def xmoe2_v1_x128(): """128 experts, ~25B params - Train for 131072 steps on 8x8.""" hparams = xmoe2_v1() hparams.moe_num_experts = [16, 8] hparams.outer_batch_size = 8 hparams.mesh_shape = "b0:8;b1:16" hparams.batch_size = 512 hparams.learning_ra...
Test on local cpu. def xmoe2_tiny(): """Test on local cpu.""" hparams = xmoe2_v1() hparams.decoder_layers = [ "local_att", "att", "compressed_att", "drd", "hmoe"] hparams.d_model = 128 hparams.moe_hidden_size = 512 hparams.outer_batch_size = 0 hparams.batch_size = 2 hparams.mesh_shape = "" hpar...
With sequence length 4096. def xmoe2_v1_l4k(): """With sequence length 4096.""" hparams = xmoe2_v1() hparams.batch_size = 32 hparams.max_length = 4096 hparams.split_to_length = 4096 hparams.reshape_logits_hack = True return hparams
With sequence length 4096. def xmoe2_v1_l4k_local_only(): """With sequence length 4096.""" hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "local_att" if l == "att" else l for l in hparams.decoder_layers] return hparams
With sequence length 4096. def xmoe2_v1_l4k_global_only(): """With sequence length 4096.""" hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] return hparams
With compressed attention. def xmoe2_v1_l4k_compressed_c4(): """With compressed attention.""" hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "compressed_att" if l == "att" else l for l in hparams.decoder_layers] hparams.compression_factor = 4 return hparams
Set of architectural experiments - language model on wikipedia on a 2x2. 1 epoch = ~180k steps at batch size 32 - we may never finish an epoch! Returns: a hparams def wiki_2x2_base(): """Set of architectural experiments - language model on wikipedia on a 2x2. 1 epoch = ~180k steps at batch size 32 - we ...
Replace tokens instead of masking. def denoise_z15(): """Replace tokens instead of masking.""" hparams = xmoe2_dense_0() hparams.decoder_type = "denoising" hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15} hparams.noising_use_eval_during_train = 0.25 return hparams
Denoising experiment. def denoise_v1_m15(): """Denoising experiment.""" hparams = xmoe2_v1() # no local attention # TODO(noam): non-masked version of local-attention hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] hparams.decoder_type = "denoising" hpara...
Downloads and extracts the dataset. Args: tmp_dir: temp directory to download and extract the dataset data_dir: The base directory where data and vocab files are stored. Returns: tmp_dir: temp directory containing the raw data. def _download_mlu_data(tmp_dir, data_dir): """Downloads and extracts th...
Get a Counter with the ngrams of the given ID list. Args: ids: np.array or a list corresponding to a single sentence n: n-gram size Returns: collections.Counter with ID tuples as keys and 1s as values. def _get_ngram_counter(ids, n): """Get a Counter with the ngrams of the given ID list. Args: ...
Compute Fbeta score. Args: true_positives: Number of true positive ngrams. selected: Number of selected ngrams. relevant: Number of relevant ngrams. beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only. Returns: Fbeta score. def _get_fbeta_score(true_positives, selected, ...
Compute the addition score (Equation 4 in the paper). def get_addition_score(source_counts, prediction_counts, target_counts): """Compute the addition score (Equation 4 in the paper).""" added_to_prediction_counts = prediction_counts - source_counts true_positives = sum((added_to_prediction_counts & target_count...
Compute the keep score (Equation 5 in the paper). def get_keep_score(source_counts, prediction_counts, target_counts): """Compute the keep score (Equation 5 in the paper).""" source_and_prediction_counts = source_counts & prediction_counts source_and_target_counts = source_counts & target_counts true_positives...
Compute the deletion score (Equation 6 in the paper). def get_deletion_score(source_counts, prediction_counts, target_counts, beta=0): """Compute the deletion score (Equation 6 in the paper).""" source_not_prediction_counts = source_counts - prediction_counts source_not_target_counts = source_counts - target_cou...
Compute the SARI score for a single prediction and one or more targets. Args: source_ids: a list / np.array of SentencePiece IDs prediction_ids: a list / np.array of SentencePiece IDs list_of_targets: a list of target ID lists / np.arrays max_gram_size: int. largest n-gram size we care about (e.g. 3 ...
Computes the SARI scores from the given source, prediction and targets. Args: source_ids: A 2D tf.Tensor of size (batch_size , sequence_length) prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length) target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets, sequence_length) ...
Computes the SARI scores from the given source, prediction and targets. An approximate SARI scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4. Also, this does not have beam search. Args: predictions: tensor, model predictions. ...
Download all MNIST files to directory unless they are there. def _get_mnist(directory): """Download all MNIST files to directory unless they are there.""" for filename in [ _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME ]: generato...
Extract images from an MNIST file into a numpy array. Args: filename: The path to an MNIST images file. num_images: The number of images in the file. Returns: A numpy array of shape [number_of_images, height, width, channels]. def _extract_mnist_images(filename, num_images): """Extract images from ...
Extract labels from an MNIST file into integers. Args: filename: The path to an MNIST labels file. num_labels: The number of labels in the file. Returns: A int64 numpy array of shape [num_labels] def _extract_mnist_labels(filename, num_labels): """Extract labels from an MNIST file into integers. ...
Image generator for MNIST. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. data_filename: file that contains features data. label_filename: file that contains labels. ...
Image generator for MNIST. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator that produ...
Download all FashionMNIST files to directory unless they are there. def _get_fashion_mnist(directory): """Download all FashionMNIST files to directory unless they are there.""" # Fashion mnist files have the same names as MNIST. # We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir. for...
Image generator for FashionMNIST. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator tha...
Generates synthetic timeseries using input parameters. Each generated timeseries has timeseries_length data points. Parameters for each timeseries are specified by timeseries_params. Args: timeseries_length: Number of data points to generate for each timeseries. timeseries_params: Parameters used to gen...
Basic 2-frame conv model with stochastic tower. def next_frame_basic_stochastic(): """Basic 2-frame conv model with stochastic tower.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_st...
Basic 2-frame conv model with stochastic tower. def next_frame_sampling_stochastic(): """Basic 2-frame conv model with stochastic tower.""" hparams = basic_deterministic_params.next_frame_sampling() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", ...
Basic 2-frame conv model with stochastic discrete latent. def next_frame_basic_stochastic_discrete(): """Basic 2-frame conv model with stochastic discrete latent.""" hparams = basic_deterministic_params.next_frame_sampling() hparams.batch_size = 4 hparams.video_num_target_frames = 6 hparams.scheduled_samplin...
Next frame stochastic discrete tuning grid. def next_frame_stochastic_discrete_range(rhp): """Next frame stochastic discrete tuning grid.""" rhp.set_float("learning_rate_constant", 0.001, 0.01) rhp.set_float("dropout", 0.2, 0.6) rhp.set_int("filter_double_steps", 3, 5) rhp.set_discrete("hidden_size", [64, 96...
Map the function f to the nested structure x (dicts, tuples, lists). def nested_map(x, f): """Map the function f to the nested structure x (dicts, tuples, lists).""" if isinstance(x, list): return [nested_map(y, f) for y in x] if isinstance(x, tuple): return tuple([nested_map(y, f) for y in x]) if isin...
Get a structure of shapes for a structure of nested arrays. def shapes(x): """Get a structure of shapes for a structure of nested arrays.""" def shape(x): try: return x.shape except Exception: # pylint: disable=broad-except return [] return nested_map(x, shape)
Get a structure of sizes for a structure of nested arrays. def sizes(x): """Get a structure of sizes for a structure of nested arrays.""" def size(x): try: return x.size except Exception: # pylint: disable=broad-except return 0 return nested_map(x, size)
Find the frame with the caller on the stack. def _find_frame(stack, start=0): """Find the frame with the caller on the stack.""" # We want to find the first place where the layer was called # that is *not* an __init__ function of an inheriting layer. frame = inspect.getframeinfo(stack[start][0]) # If we are ...
Shorten file path in error lines for more readable tracebacks. def _shorten_file_path(line): """Shorten file path in error lines for more readable tracebacks.""" start = line.lower().find('file') if start < 0: return line first_quote = line.find('"', start) if first_quote < 0: return line second_qu...
Cleaned-up form of traceback. def _short_traceback(skip=3): """Cleaned-up form of traceback.""" counter, res = 0, [] # Skipping 3 lines by default: the top (useless) and self-call. lines = traceback.format_exc().splitlines()[skip:] for l in lines: res.append(_shorten_file_path(l)) if counter % 2 == 1...
Create a layer class from a function. def layer(output_shape=None, new_parameters=None): """Create a layer class from a function.""" def layer_decorator(call): """Decorating the call function.""" def output_shape_fun(self, input_shape): if output_shape is None: return input_shape kwargs...
Initialize the layer given an input shape and rng. Returns new_parameters(input_shape, rng) on the first call and () on any subsequent call, as the layer is already initialized. This is used for networks that share parameters, so the layer only produces them once. Note that all arguments and return va...
Returns dict<str ref_url, str ref_content>. def _references_content(ref_files): """Returns dict<str ref_url, str ref_content>.""" example_spec = { "url": tf.FixedLenFeature([], tf.string), "content": tf.FixedLenFeature([], tf.string), } data = {} for ex in generator_utils.tfrecord_iterator( ...
Urls for chunk: dict<str wiki_url, list<str> ref_urls>. def _wiki_urls_for_shard(shard_id, urls_dir=None): """Urls for chunk: dict<str wiki_url, list<str> ref_urls>.""" urls_dir = urls_dir or WIKI_URLS_DIR urls_filepath = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id) with tf.gfile.GFile(urls_filepath) as f...
Generates WikipediaArticles from GCS that are part of shard shard_id. def _wiki_articles(shard_id, wikis_dir=None): """Generates WikipediaArticles from GCS that are part of shard shard_id.""" if not wikis_dir: wikis_dir = WIKI_CONTENT_DIR with tf.Graph().as_default(): dataset = tf.data.TFRecordDataset( ...
Rank and return reference paragraphs by tf-idf score on title tokens. def rank_reference_paragraphs(wiki_title, references_content, normalize=True): """Rank and return reference paragraphs by tf-idf score on title tokens.""" normalized_title = _normalize_text(wiki_title) title_tokens = _tokens_to_score( se...
Produce examples from shard_ids to out_filepaths. def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path, out_filepaths): """Produce examples from shard_ids to out_filepaths.""" # * Join the Wikipedia articles with their references # * Run Tf-idf to sort reference paragrap...
Encodes sections with vocab. Returns ids and section boundaries. def _encode_wiki_sections(sections, vocab): """Encodes sections with vocab. Returns ids and section boundaries.""" ids = [] section_boundaries = [] for i, section in enumerate(sections): if i > 0: # Skip including article title id...
Extract references from WET files into sharded output files. def extract_references_from_wets(wet_files, metadata_dir, out_dir, tmp_dir=None): """Extract references from WET files into sharded output files.""" # Setup output files shard_files = make_ref_shard_files(out_dir) nu...
Extract pages from an xml dump. Args: dump: a unicode string Returns: a list of unicode strings def _dump_to_pages(dump): """Extract pages from an xml dump. Args: dump: a unicode string Returns: a list of unicode strings """ pos = 0 ret = [] start_tag = u"<page>\n" end_tag = u"</p...