instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Write docstrings describing each step | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import numpy as np
import torch
from fairseq.data import FairseqDataset, MonolingualDataset, data_utils
... | --- +++ @@ -12,6 +12,40 @@
class SpeechDLMDataset(FairseqDataset):
+ """The dataset used to train the SpeechDLM model as described in the paper:
+ https://arxiv.org/pdf/2203.16502.pdf
+
+ The input datasets is expected to be a dict over channel names with the values
+ being instances of :class:`~fairseq... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/speech_dlm_dataset.py |
Write clean docstrings for readability | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import json
import subprocess
import tempfile
from typing import Hashable
try:
import pyarrow.plasma as plasma
PYARR... | --- +++ @@ -20,6 +20,12 @@
class PlasmaArray:
+ """
+ Wrapper around numpy arrays that automatically moves the data to shared
+ memory upon serialization. This is particularly helpful when passing numpy
+ arrays through multiprocessing, so that data is not unnecessarily
+ duplicated or pickled.
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/plasma_utils.py |
Add detailed documentation for each class | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from fairseq.data import data_utils
class WordNoising(object):
def __init__(self, dictionary, bpe_cont_... | --- +++ @@ -9,6 +9,7 @@
class WordNoising(object):
+ """Generate a noisy version of a sentence, without changing words themselves."""
def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None):
self.dictionary = dictionary
@@ -36,6 +37,12 @@ raise NotImplementedError()
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/noising.py |
Generate missing documentation strings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Dict, Sequence
import numpy as np
from . import FairseqDataset, Langua... | --- +++ @@ -15,6 +15,17 @@
class RoundRobinZipDatasets(FairseqDataset):
+ """Zip multiple :class:`~fairseq.data.FairseqDataset` instances together.
+
+ Shorter datasets are repeated in a round-robin fashion to match the length
+ of the longest one.
+
+ Args:
+ datasets (Dict[~fairseq.data.Fairseq... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/round_robin_zip_datasets.py |
Create docstrings for API functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import numpy as np
from fairseq.data.data_utils import numpy_seed
from . import BaseWrapperDataset
logger... | --- +++ @@ -16,6 +16,12 @@
class SubsampleDataset(BaseWrapperDataset):
+ """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples
+
+ Args:
+ dataset (~torch.utils.data.Dataset): dataset to subsample
+ size_ratio(float): the ratio to subsample to. must be... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/subsample_dataset.py |
Add docstrings that explain logic | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import torch
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
def ... | --- +++ @@ -12,6 +12,7 @@
def _flatten(dico, prefix=None):
+ """Flatten a nested dictionary."""
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = prefix + "." if prefix is not None else ""
@@ -28,6 +29,7 @@
def _unflatten(dico):
+ """Unflatten a flattened dictionary into a ne... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/nested_dictionary_dataset.py |
Write docstrings for utility functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from typing import Optional
import torch
from fairseq.dataclass.configs import DistributedTrainingConfig
from fairseq.distr... | --- +++ @@ -21,6 +21,18 @@
class FullyShardedDataParallel(FSDP):
+ """
+ A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some
+ fairseq-specific checkpoint saving/loading logic.
+
+ Args:
+ use_sharded_state (bool): if True, then ``state_dict`` will return
+ ``F... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/distributed/fully_sharded_data_parallel.py |
Write docstrings for this repository | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from contextlib import contextmanager
import torch
from torch import nn
from fairseq.distributed import... | --- +++ @@ -3,6 +3,16 @@ # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+A modified version of the legacy DistributedDataParallel module that uses c10d
+communication primitives. This version is simpler than the latest PyTorch
+version... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/distributed/legacy_distributed_data_parallel.py |
Add documentation for all methods | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from fairseq.data import FairseqDataset, plasma_utils
from fairseq.data.indexed_dataset import best_fitting_in... | --- +++ @@ -11,6 +11,27 @@
class TokenBlockDataset(FairseqDataset):
+ """Break a Dataset of tokens into blocks.
+
+ Args:
+ dataset (~torch.utils.data.Dataset): dataset to break into blocks
+ sizes (List[int]): sentence lengths (required for 'complete' and 'eos')
+ block_size (int): maxim... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/token_block_dataset.py |
Add structured docstrings to improve clarity | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import signal
import threading
from torch import nn
logger = logging.getLogger(__name__)
class DistributedTimeou... | --- +++ @@ -15,6 +15,24 @@
class DistributedTimeoutWrapper(nn.Module):
+ """
+ A wrapper that kills the process if no progress is made within a given
+ *timeout*. The timer is reset every time :func:`forward` is called.
+
+ Usage::
+
+ module = DistributedTimeoutWrapper(module, timeout=30)
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/distributed/distributed_timeout_wrapper.py |
Fully document this Python code with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.model_parallel.model... | --- +++ @@ -2,6 +2,9 @@ #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+RoBERTa: A Robustly Optimized BERT Pretraining Approach.
+"""
import logging
@@ -56,6 +59,7 @@
@classmethod
def build_model(cls, args, task):
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/models/roberta/model.py |
Write docstrings including parameters and return values | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import inspect
import logging
import os
import re
from argparse import ArgumentError, ArgumentParser, Namespace
from dataclasses im... | --- +++ @@ -56,6 +56,12 @@ delete_default: bool = False,
with_prefix: Optional[str] = None,
) -> None:
+ """
+ convert a dataclass instance to tailing parser arguments.
+
+ If `with_prefix` is provided, prefix all the keys in the resulting parser with it. It means that we are
+ building a flat nam... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/dataclass/utils.py |
Write docstrings that follow conventions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import hashlib
import logging
import time
from bisect import bisect_right
from collections import OrderedDict, defaultdict
fro... | --- +++ @@ -48,6 +48,29 @@
class SampledMultiDataset(FairseqDataset):
+ """Samples from multiple sub-datasets according to given sampling ratios.
+ Args:
+ datasets (
+ List[~torch.utils.data.Dataset]
+ or OrderedDict[str, ~torch.utils.data.Dataset]
+ ): datasets
+ s... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/multilingual/sampled_multi_dataset.py |
Document my Python code with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.model_parallel.model... | --- +++ @@ -170,6 +170,7 @@
@staticmethod
def add_args(parser):
+ """Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
@@ -229,6 +230,7 @@
@classmethod
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py |
Document this module using docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import option... | --- +++ @@ -30,6 +30,7 @@
class TransformerEncoderEmbedding(nn.Module):
+ """Encoder Embedding + Positional Embedding"""
def __init__(self, args, embed_tokens):
super().__init__()
@@ -85,6 +86,10 @@
class TransformerEncoderLayerNorm(nn.Module):
+ """
+ Layer norm at the the end of all e... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py |
Write Python docstrings for this snippet | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import typing as tp
def _safe_readline(fd) -> str:
pos = fd.tell()
while True:
try:
return fd.readline... | --- +++ @@ -18,6 +18,10 @@
def find_offsets(filename: str, num_chunks: int) -> tp.List[int]:
+ """
+ given a file and a number of chuncks, find the offsets in the file
+ to be able to chunk around full lines.
+ """
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/file_chunker_utils.py |
Provide docstrings following PEP 257 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
class ModuleProxyWrapper(nn.Module):
def __init__(self, module: nn.Module):
super().__init__()
ass... | --- +++ @@ -7,6 +7,22 @@
class ModuleProxyWrapper(nn.Module):
+ """
+ Wrap a DistributedDataParallel module and forward requests for missing
+ attributes to the module wrapped by DDP (the twice-wrapped module).
+ Also forward calls to :func:`state_dict` and :func:`load_state_dict`.
+
+ Usage::
+
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/distributed/module_proxy_wrapper.py |
Document functions with detailed explanations | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import Callable, Dict, List
import numpy as np
from . import FairseqDataset
def uniform_sa... | --- +++ @@ -17,6 +17,16 @@
class MultiCorpusSampledDataset(FairseqDataset):
+ """
+ Stores multiple instances of FairseqDataset together and in every iteration
+ creates a batch by first sampling a dataset according to a specified
+ probability distribution and then getting instances from that dataset.
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/multi_corpus_sampled_dataset.py |
Generate docstrings for script automation | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
from typing import List, Optional
logger = logging.getLogger(__file__)
try:... | --- +++ @@ -34,6 +34,11 @@
class PathManager:
+ """
+ Wrapper for insulating OSS I/O (using Python builtin operations) from
+ iopath's PathManager abstraction (for transparently handling various
+ internal backends).
+ """
@staticmethod
def open(
@@ -128,6 +133,7 @@
@staticmethod
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/file_io.py |
Write docstrings describing functionality | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
from fairseq.model_parallel.modules import (
ModelParallelTransformerDecoderLayer,
ModelParalle... | --- +++ @@ -35,6 +35,9 @@
@register_model("model_parallel_transformer")
class ModelParallelTransformerModel(TransformerModel):
+ """
+ Model parallel Transformer model.
+ """
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
@@ -77,6 +80,10 @@
class ModelParallel... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/models/transformer.py |
Write clean docstrings for readability | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from fairseq.data import data_utils
from . import BaseWrapperDataset
class TruncateDataset(BaseWrapperDataset):
def... | --- +++ @@ -10,6 +10,7 @@
class TruncateDataset(BaseWrapperDataset):
+ """Truncate a sequence by returning the first truncation_length tokens"""
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
@@ -33,6 +34,7 @@
class RandomCropDataset(TruncateDataset):
+ """Trunca... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/data/shorten_dataset.py |
Replace inline comments with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import time
from collections import OrderedDict
from typing import Dict, Optional
try:
import torch
def type_as(a, b):... | --- +++ @@ -31,6 +31,7 @@
class Meter(object):
+ """Base class for Meters."""
def __init__(self):
pass
@@ -46,6 +47,7 @@
@property
def smoothed_value(self) -> float:
+ """Smoothed value used for logging."""
raise NotImplementedError
@@ -61,6 +63,7 @@
class Avera... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/logging/meters.py |
Add docstrings to clarify complex logic | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import logging
import os
import pickle
import random
import socket
import struct
import subprocess
import warnings
from argparse imp... | --- +++ @@ -483,6 +483,7 @@
def get_data_parallel_group():
+ """Get the data parallel group the caller rank belongs to."""
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
@@ -493,10 +494,12 @@
def get_data_parallel_rank():
+ """Return my rank for t... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/distributed/utils.py |
Add docstrings following best practices | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import utils
from torch import Tensor
class FairseqDecode... | --- +++ @@ -11,6 +11,7 @@
class FairseqDecoder(nn.Module):
+ """Base class for decoders."""
def __init__(self, dictionary):
super().__init__()
@@ -19,6 +20,18 @@ self.adaptive_softmax = None
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
+ """
+ Ar... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/fairseq_decoder.py |
Generate docstrings for script automation | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import json
import logging
import os
import sys
from collections import OrderedDict
from contextlib import contextmanager
from ... | --- +++ @@ -3,6 +3,9 @@ # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+Wrapper around various loggers and progress bars (e.g., tqdm).
+"""
import atexit
import json
@@ -93,6 +96,7 @@ default: str = "tqdm",
no_progress_bar... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/logging/progress_bar.py |
Replace inline comments with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from argparse import Namespace
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.... | --- +++ @@ -2,6 +2,9 @@ #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+Base classes for various fairseq models.
+"""
import logging
from argparse import Namespace
@@ -34,6 +37,7 @@
class BaseFairseqModel(nn.Module):
+ """B... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/fairseq_model.py |
Write proper docstrings for these functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from torch import Tensor
EncoderOut = NamedTuple(
... | --- +++ @@ -24,15 +24,28 @@
class FairseqEncoder(nn.Module):
+ """Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths=None, **kwargs):
+ """
+ Args:
+ src_tok... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/fairseq_encoder.py |
Add clean documentation to messy code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
Fai... | --- +++ @@ -28,6 +28,22 @@
@register_model("fconv")
class FConvModel(FairseqEncoderDecoderModel):
+ """
+ A fully convolutional model, i.e. a convolutional encoder and a
+ convolutional decoder, as described in `"Convolutional Sequence to Sequence
+ Learning" (Gehring et al., 2017) <https://arxiv.org/abs... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/fconv.py |
Generate consistent docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import numpy as np
import torch
from fairseq import utils
DecoderOut = namedtuple(
"IterativeRefinem... | --- +++ @@ -31,6 +31,18 @@ retain_history=False,
reranking=False,
):
+ """
+ Generates translations based on iterative refinement.
+
+ Args:
+ tgt_dict: target dictionary
+ eos_penalty: if > 0.0, it penalized early-stopping in decoding
+ max_it... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/iterative_refinement_generator.py |
Add docstrings including usage examples | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from fairseq import utils... | --- +++ @@ -28,6 +28,11 @@
@with_incremental_state
class ModelParallelMultiheadAttention(nn.Module):
+ """Model parallel Multi-headed attention.
+ This performs the Multi-headed attention over multiple gpus.
+
+ See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
+ """
def __... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/modules/multihead_attention.py |
Write docstrings for this repository | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import register_m... | --- +++ @@ -2,6 +2,10 @@ #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+BART: Denoising Sequence-to-Sequence Pre-training for
+Natural Language Generation, Translation, and Comprehension
+"""
import logging
from typing import Optio... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/bart/model.py |
Write docstrings for data processing functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .fairseq_encoder import FairseqEncoder
class CompositeEncoder(FairseqEncoder):
def __init__(self, encoders):
super().__ini... | --- +++ @@ -7,6 +7,15 @@
class CompositeEncoder(FairseqEncoder):
+ """
+ A wrapper around a dictionary of :class:`FairseqEncoder` objects.
+
+ We run forward on each encoder and return a dictionary of outputs. The first
+ encoder's dictionary is used for initialization.
+
+ Args:
+ encoders (d... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/composite_encoder.py |
Generate documentation strings for clarity | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import logging
import math
from argparse import Namespace
from dataclasses import dataclass, field
from typing i... | --- +++ @@ -159,10 +159,12 @@
@classmethod
def build_model(cls, cfg: HubertCtcConfig, task: FairseqTask):
+ """Build a new model instance."""
w2v_encoder = HubertEncoder(cfg, task)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
+ """... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/hubert/hubert_asr.py |
Add docstrings to meet PEP guidelines | #!/usr/bin/env python3
import copy
import logging
import torch
from fairseq import checkpoint_utils
class EMA(object):
def __init__(self, model, config, device=None, skip_keys=None):
self.decay = config.ema_decay
self.model = copy.deepcopy(model)
self.model.requires_grad_(False)
... | --- +++ @@ -1,5 +1,25 @@ #!/usr/bin/env python3
+"""
+This module has the EMA class used to store a copy of the exponentially decayed
+model params.
+
+Typical usage of EMA class involves initializing an object using an existing
+model (random or from a seed model) and setting the config like ema_decay,
+ema_start_up... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/ema/ema.py |
Document all endpoints with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as ... | --- +++ @@ -21,6 +21,10 @@
class BARTHubInterface(GeneratorHubInterface):
+ """A simple PyTorch Hub interface to BART.
+
+ Usage: https://github.com/pytorch/fairseq/tree/main/examples/bart
+ """
def __init__(self, cfg, task, model):
super().__init__(cfg, task, [model])
@@ -29,6 +33,25 @@ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/bart/hub_interface.py |
Add docstrings including usage examples | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.model_parallel.modules import ModelParallelMultiheadAttention
from fairseq.modules import TransformerDecoderLayer, TransformerEnc... | --- +++ @@ -19,6 +19,10 @@
class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer):
+ """Encoder layer block over multiple gpus.
+
+ See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
+ """
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/modules/transformer_layer.py |
Write docstrings for algorithm functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import uuid
from collections import defaultdict
from typing import Callable, List, Optional
from .meters import *
# Aggre... | --- +++ @@ -2,6 +2,14 @@ #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+A standalone module for aggregating metrics.
+
+Metrics can be logged from anywhere using the `log_*` functions defined
+in this module. The logged values will b... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/logging/metrics.py |
Write docstrings describing each step | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import fnmatch
import json
import logging
import os
import shutil
import tarfile
import tempfile
from functools import partial, wraps
from ha... | --- +++ @@ -3,6 +3,11 @@ # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+Utilities for working with the local dataset cache.
+This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
+and `huggingface <https://githu... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/file_utils.py |
Document helper functions with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fair... | --- +++ @@ -33,6 +33,23 @@
@register_model("lightconv")
class LightConvModel(FairseqEncoderDecoderModel):
+ """
+ LightConv and DynamicConv model from `"Pay Less Attention with Lightweight and Dynamic Convolutions" (Wu, et al, 2019)
+ <https://openreview.net/pdf?id=SkVhlh09tX>`_.
+ To use LightConv pleas... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/lightconv.py |
Add inline docstrings for readability | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
... | --- +++ @@ -70,6 +70,7 @@
@staticmethod
def add_args(parser):
+ """Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
@@ -109,6 +110,7 @@
@classmethod
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/fconv_self_att.py |
Write beginner-friendly docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
from typing import Dict, Optional
from torch import Tensor
class FairseqIncrementalState(object):
def __init__(self, *args,... | --- +++ @@ -25,6 +25,7 @@ incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
+ """Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incre... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/incremental_decoding_utils.py |
Improve my code by adding docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
... | --- +++ @@ -2,6 +2,9 @@ #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+RoBERTa: A Robustly Optimized BERT Pretraining Approach.
+"""
import logging
@@ -49,6 +52,7 @@
@staticmethod
def add_args(parser):
+ """Add... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/roberta/model.py |
Write clean docstrings for readability | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import importlib
import os
from contextlib import ExitStack
from fairseq.dataclass import FairseqDataclass
from fairseq.data... | --- +++ @@ -2,6 +2,7 @@ #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""isort:skip_file"""
import argparse
import importlib
@@ -106,6 +107,24 @@
def register_model(name, dataclass=None):
+ """
+ New model types can be adde... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/__init__.py |
Add docstrings to existing functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.dataclass.configs import FairseqConfig
from fairseq.distributed import utils as distributed_utils
from fairseq.trainer import Tr... | --- +++ @@ -3,6 +3,9 @@ # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+Train a network across multiple GPUs.
+"""
from fairseq.dataclass.configs import FairseqConfig
from fairseq.distributed import utils as distributed_utils
@@ -2... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/megatron_trainer.py |
Add docstrings to clarify complex logic | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encod... | --- +++ @@ -12,6 +12,10 @@
class RobertaHubInterface(nn.Module):
+ """A simple PyTorch Hub interface to RoBERTa.
+
+ Usage: https://github.com/pytorch/fairseq/tree/main/examples/roberta
+ """
def __init__(self, cfg, task, model):
super().__init__()
@@ -31,6 +35,26 @@ def encode(
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/roberta/hub_interface.py |
Add docstrings to make code maintainable | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import FairseqCriterion, register_criterion... | --- +++ @@ -33,6 +33,13 @@ )
def forward(self, model, sample, reduce=True):
+ """Compute the loss for the given sample.
+
+ Returns a tuple with three elements:
+ 1) the loss
+ 2) the sample size, which is used as the denominator for the gradient
+ 3) logging output... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py |
Add docstrings for better understanding | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List
import torch
from... | --- +++ @@ -95,6 +95,10 @@
class GeneratorHubInterface(nn.Module):
+ """
+ PyTorch Hub interface for generating sequences from a pre-trained
+ translation or language model.
+ """
def __init__(self, cfg, task, models):
super().__init__()
@@ -291,6 +295,7 @@
class BPEHubInterface(obje... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/hub_utils.py |
Document this script properly | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Tuple, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import Layer... | --- +++ @@ -15,6 +15,19 @@
class CrossChannelTransformerDecoderLayer(nn.Module):
+ """Cross-Attention Transformer Decoder Layer block as described
+ in the paper: https://arxiv.org/pdf/2203.16502.pdf
+
+ Composed of a Multi-head Self Attention block followed by a
+ Multi-head Cross-Attention block which... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_dlm/modules/speech_dlm_decoder_layer.py |
Add return value explanations in docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
from omegaconf.listconfig import ListConfig
from omegaconf.dictconfig import DictConfig
... | --- +++ @@ -40,6 +40,31 @@ lm_weight=1.0,
duration_temperature=1.0,
):
+ """Generate multi-channel parallel units with the SpeechDLM model
+ as described in the paper: https://arxiv.org/pdf/2203.16502.pdf;
+
+ Args:
+ models (List[~fairseq.models.FairseqModel]): ens... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_dlm/sequence_generator/multichannel_sequence_generator.py |
Add docstrings that explain purpose and usage | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, Optional
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.models im... | --- +++ @@ -16,6 +16,26 @@
@with_incremental_state
class FairseqIncrementalDecoder(FairseqDecoder):
+ """Base class for incremental decoders.
+
+ Incremental decoding is a special mode at inference time where the Model
+ only receives a single timestep of input corresponding to the previous
+ output toke... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/fairseq_incremental_decoder.py |
Add return value explanations in docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
f... | --- +++ @@ -31,6 +31,7 @@
@staticmethod
def add_args(parser):
+ """Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
@@ -83,6 +84,7 @@
@classmethod
de... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/lstm.py |
Document this code for team use | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn as nn
from torch import Tensor
class MultichannelSearch(nn.Module):
def ... | --- +++ @@ -29,6 +29,31 @@ def step(
self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
):
+ """Take a single search step.
+
+ Args:
+ step: the current search step, starting at 0
+ lprobs: dictionary of channels {channel : (bsz x input_be... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_dlm/sequence_generator/multichannel_search.py |
Help me comply with documentation standards | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.mod... | --- +++ @@ -26,6 +26,21 @@
class CrossChannelTransformerDecoder(FairseqIncrementalDecoder):
+ """
+ Cross-channel Transformer Decoder Block for parallel spoken dialogue units
+ as described in the paper: https://arxiv.org/pdf/2203.16502.pdf;
+ consisting of *args.decoder_layers* layers. Each layer is a
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_dlm/modules/speech_dlm_decoder.py |
Add docstrings to make code maintainable | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
... | --- +++ @@ -29,6 +29,10 @@
@register_model("masked_lm")
class MaskedLMModel(FairseqEncoderModel):
+ """
+ Class for training a Masked Language Model. It also supports an
+ additional sentence level prediction if the sent-loss argument is set.
+ """
def __init__(self, args, encoder):
super... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/masked_lm.py |
Create docstrings for reusable components | #!/usr/bin/env python3
from ast import literal_eval
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEnc... | --- +++ @@ -20,6 +20,20 @@
@register_model("s2t_berard")
class BerardModel(FairseqEncoderDecoderModel):
+ """Implementation of a model similar to https://arxiv.org/abs/1802.04200
+
+ Paper title: End-to-End Automatic Speech Translation of Audiobooks
+ An implementation is available in tensorflow at
+ htt... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/berard.py |
Add docstrings following best practices | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
from fairseq import utils
from fairseq.dataclass import C... | --- +++ @@ -126,12 +126,16 @@
@register_model("speech_dlm", dataclass=SpeechDLMConfig)
class SpeechDLM(FairseqLanguageModel):
+ """Spoken Unit-based Dialogue Language Model model (SpeechDLM) as described
+ in the paper: https://arxiv.org/pdf/2203.16502.pdf
+ """
def __init__(self, decoder):
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_dlm/speech_dlm.py |
Help me document legacy Python code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from fairseq import utils
from fairseq.models import (
FairseqMultiModel,
register_model,
reg... | --- +++ @@ -23,12 +23,27 @@
@register_model("multilingual_transformer")
class MultilingualTransformerModel(FairseqMultiModel):
+ """Train Transformer models for multiple language pairs simultaneously.
+
+ Requires `--task multilingual_translation`.
+
+ We inherit all arguments from TransformerModel and assu... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/multilingual_transformer.py |
Add well-formatted docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
import torch
from fairseq import checkpoint_utils
from fairseq.models import register_model, registe... | --- +++ @@ -39,6 +39,8 @@
class S2SConformerEncoder(S2TConformerEncoder):
+ """Based on S2T transformer encoder, with support
+ to incorporate target speaker embedding."""
def __init__(self, args):
super().__init__(args)
@@ -66,6 +68,9 @@
@register_model("s2ut_conformer")
class S2UTConforme... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_speech/s2s_conformer.py |
Generate documentation strings for clarity | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from to... | --- +++ @@ -38,6 +38,11 @@
@register_model("s2t_transformer")
class S2TTransformerModel(FairseqEncoderDecoderModel):
+ """Adapted Transformer model (https://arxiv.org/abs/1706.03762) for
+ speech-to-text tasks. The Transformer encoder/decoder remains the same.
+ A trainable input subsampler is prepended to ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/s2t_transformer.py |
Add minimal docstrings for each function | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
... | --- +++ @@ -29,12 +29,17 @@
@register_model("convtransformer")
class ConvTransformerModel(FairseqEncoderDecoderModel):
+ """
+ Transformer-based Speech translation model from ESPNet-ST
+ https://arxiv.org/abs/2004.10234
+ """
def __init__(self, encoder, decoder):
super().__init__(encoder,... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/convtransformer.py |
Add docstrings to improve code quality | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from fairseq.models import FairseqE... | --- +++ @@ -51,6 +51,12 @@ return stride
def forward(self, src_tokens, src_lengths, states=None):
+ """Encode input sequence.
+ :param torch.Tensor xs: input tensor
+ :param torch.Tensor masks: input mask
+ :return: position embedded tensor and mask
+ :rtype Tuple[torch... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/modules/augmented_memory_attention.py |
Add docstrings that explain logic | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import... | --- +++ @@ -592,6 +592,7 @@ nn.init.uniform_(self.label_embs_concat[i])
def upgrade_state_dict_named(self, state_dict, name):
+ """Upgrade a (possibly old) state dict for new versions of fairseq."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@@ -60... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/multires_hubert/multires_hubert.py |
Help me write clear docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from torch im... | --- +++ @@ -297,6 +297,7 @@
@classmethod
def add_args(cls, parser):
+ """Add model-specific arguments to the parser."""
add_wav2vec_asr_args(parser)
parser.add_argument(
"--normalize",
@@ -545,6 +546,7 @@
@classmethod
def add_args(cls, parser):
+ """Add... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/xm_transformer.py |
Improve my code by adding docstrings | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
from collections.a... | --- +++ @@ -20,6 +20,7 @@
def assert_equal(value1, value2, name1=None, name2=None):
+ """Asserts two values are equal otherwise raise an error."""
str_name1 = "" if name1 is None else "{} ".format(name1)
str_name2 = "" if name2 is None else "{} ".format(name2)
@@ -46,6 +47,10 @@
def check_and_ret... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/utils.py |
Create Google-style docstrings for my code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Any
import torch
import torch.nn... | --- +++ @@ -147,10 +147,12 @@
@classmethod
def build_model(cls, cfg: MultiresHubertAsrConfig, task: FairseqTask):
+ """Build a new model instance."""
multireshubert_encoder = MultiresHubertEncoder(cfg, task)
return cls(cfg, multireshubert_encoder)
def get_normalized_probs(self... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/multires_hubert/multires_hubert_asr.py |
Add missing documentation to my Python functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from fairseq.models import (
FairseqEncoderModel,
FairseqLanguageModel,
register_model,
register_m... | --- +++ @@ -73,6 +73,7 @@
@classmethod
def add_args(cls, parser):
+ """Add model-specific arguments to the parser."""
XMTransformerModel.add_args(parser)
parser.add_argument(
"--translation-decoder-layers",
@@ -159,6 +160,7 @@
@classmethod
def build_model(cls,... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/xm_transformer_unity.py |
Create docstrings for API functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from pathlib import Path
import torch
from fairseq import checkpoint_utils
from fairseq.data.data_utils import le... | --- +++ @@ -30,6 +30,7 @@
class S2TConformerEncoder(FairseqEncoder):
+ """Conformer Encoder for speech translation based on https://arxiv.org/abs/2005.08100"""
def __init__(self, args):
super().__init__(None)
@@ -88,6 +89,19 @@ )
def _forward(self, src_tokens, src_lengths, return_a... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/s2t_conformer.py |
Add docstrings following best practices | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
import logging
from fairseq impo... | --- +++ @@ -26,6 +26,21 @@
class TransformerModelBase(FairseqEncoderDecoderModel):
+ """
+ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
+ <https://arxiv.org/abs/1706.03762>`_.
+
+ Args:
+ encoder (TransformerEncoder): the encoder
+ decoder (TransformerDecoder)... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/transformer/transformer_base.py |
Write docstrings for algorithm functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import uti... | --- +++ @@ -37,6 +37,17 @@
class TransformerDecoderBase(FairseqIncrementalDecoder):
+ """
+ Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer
+ is a :class:`TransformerDecoderLayer`.
+
+ Args:
+ cfg (argparse.Namespace): parsed command-line arguments
+ dictionary (~... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/transformer/transformer_decoder.py |
Auto-generate documentation strings for this file | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq.models.nat import (
_apply_del_words,
_apply_ins_masks,
_ap... | --- +++ @@ -30,6 +30,7 @@
class BasicEnsembleModel(torch.nn.Module):
+ """A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
@@ -61,6 +62,7 @@
class EnsembleLevT(BasicEnsembleModel):
+ """A wrapper around an ensemble of models."""
def __init__(... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/nat/nonautoregressive_ensembles.py |
Add detailed documentation for each class | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict
from fairseq import checkpoint_utils
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDict... | --- +++ @@ -21,6 +21,7 @@ class TransformerFromPretrainedXLMModel(TransformerModel):
@staticmethod
def add_args(parser):
+ """Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--pretrained-xlm-checkpoint",
@@ -72,6 +73,... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/transformer_from_pretrained_xlm.py |
Add docstrings to make code maintainable | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from typing import List
import torch
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_to... | --- +++ @@ -10,6 +10,17 @@
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
+ """
+ Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).
+
+ Args:
+ roberta (RobertaHubInterface): RoBERTa instance
+ bpe_tokens (torch.LongTensor): GPT-2 ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/roberta/alignment_utils.py |
Add docstrings to improve collaboration | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
imp... | --- +++ @@ -422,6 +422,7 @@
@classmethod
def build_model(cls, cfg: Wav2Vec2Config, task=None):
+ """Build a new model instance."""
return cls(cfg)
@@ -576,6 +577,9 @@ return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
+ """
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/wav2vec/wav2vec2.py |
Add docstrings that explain purpose and usage | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import logging
import math
import re
from argparse import Namespace
from dataclasses import dataclass, field
fro... | --- +++ @@ -220,6 +220,7 @@
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
+ """Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, len(task.target_dictionary))
return cls(cfg, w2v_encoder)
@@ -253,6 +254,7 @@ return logits
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/wav2vec/wav2vec2_asr.py |
Generate documentation strings for clarity | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Any, Optional
imp... | --- +++ @@ -59,6 +59,7 @@
@classmethod
def build_model(cls, cfg: Wav2Vec2ClassificationConfig, task: FairseqTask):
+ """Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, None)
pooling_layer = get_pooling_layer(
cfg,
@@ -69,6 +70,7 @@ return cls(cfg... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/wav2vec/wav2vec2_classification.py |
Add standardized docstrings across the file | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
from torch import Tensor
from fairseq.models.transformer import Linear
from fairseq.models.tran... | --- +++ @@ -12,6 +12,7 @@
class AugTransformerUnitDecoder(AugTransformerDecoder):
+ """Based on Transformer decoder, with support to decoding stacked units"""
def __init__(
self,
@@ -49,6 +50,24 @@ src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_speech/modules/transformer_decoder_aug.py |
Generate docstrings for exported functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
from fairseq impo... | --- +++ @@ -28,6 +28,8 @@
class S2STransformerEncoder(S2TTransformerEncoder):
+ """Based on S2T transformer encoder, with support
+ to incorporate target speaker embedding."""
def __init__(self, args):
super().__init__(args)
@@ -54,6 +56,7 @@
class TransformerUnitDecoder(TransformerDecoder... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_speech/s2s_transformer.py |
Add docstrings to improve collaboration | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.utils import new_arange
# -------------- Helper Functions --------------------------------------------------- #
... | --- +++ @@ -229,6 +229,9 @@
def _skip(x, mask):
+ """
+ Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors.
+ """
if isinstance(x, int):
return x
@@ -260,6 +263,9 @@
def _fill(x, mask, y, padding_idx):
+ """
+ Filling tensor x with y at masked positi... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/nat/levenshtein_utils.py |
Help me document legacy Python code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules.transformer_layer import TransformerEncoderLayer
from typing import Optional
import torch
import torch.nn as nn
from fair... | --- +++ @@ -51,6 +51,19 @@
class XMODTransformerEncoderLayerBase(TransformerEncoderLayer):
+ """Encoder layer block.
+
+ In the original paper each operation (multi-head attention or FFN) is
+ postprocessed with: `dropout -> add residual -> layernorm`. In the
+ tensor2tensor code they suggest that learn... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/xmod/transformer_layer_xmod.py |
Add docstrings to my Python code | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import mat... | --- +++ @@ -41,6 +41,9 @@
class RelativePositionEmbedding(nn.Module):
+ """
+ Implementation according to https://arxiv.org/abs/1803.02155
+ """
def __init__(self, head_dim, max_position, norm_init=True):
super().__init__()
@@ -95,6 +98,18 @@
class PositionwiseFF(nn.Module):
+ """
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/modules/emformer.py |
Help me add docstrings to my project | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
import torch.nn as nn
class Conv1dSubsampler(nn.Module):
def __init__(
self,
in_... | --- +++ @@ -11,6 +11,16 @@
class Conv1dSubsampler(nn.Module):
+ """Convolutional subsampler: a stack of 1D convolution (along temporal
+ dimension) followed by non-linear activation via gated linear units
+ (https://arxiv.org/abs/1911.08460)
+
+ Args:
+ in_channels (int): the number of input chan... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/speech_to_text/modules/convolution.py |
Add detailed documentation for each class | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ..roberta.model_xlmr import XLMRModel
from fairseq.models.xmod.transformer_layer_xmod import XMODTransformerEncoderLayerBase
from ..rober... | --- +++ @@ -62,6 +62,7 @@
@classmethod
def build_model(cls, args, task):
+ """Build a new model instance."""
from omegaconf import OmegaConf
@@ -104,6 +105,7 @@
class XMODEncoder(RobertaEncoder):
+ """XMOD encoder."""
def build_encoder(self, args, dictionary, embed_tokens):
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/xmod/model.py |
Add docstrings to existing functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import operator
import torch
import torch.nn.functional as F
from fairseq.modules.fairseq_dropout import FairseqDropout
from... | --- +++ @@ -56,6 +56,11 @@
class AdaptiveSoftmax(nn.Module):
+ """
+ This is an implementation of the efficient softmax approximation for
+ graphical processing units (GPU), described in the paper "Efficient softmax
+ approximation for GPUs" (http://arxiv.org/abs/1609.04309).
+ """
def __init_... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/adaptive_softmax.py |
Add docstrings to my Python code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dynamicconv_cuda
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import ... | --- +++ @@ -133,6 +133,8 @@ )
def _forward_unfolded(self, x, incremental_state, query):
+ """The conventional implementation of convolutions.
+ Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py |
Generate docstrings with parameter types | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import torch
from torch import nn
from fairseq.modules.rotary_positional_embedding import (
RotaryPositionalEmbedding,
apply_rotary_pos_emb,
)
class ESPNE... | --- +++ @@ -4,6 +4,7 @@ # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
+"""Multi-Head Attention layer definition."""
import math
@@ -17,8 +18,15 @@
class ESPNETMultiHeadedAttention(nn.Module):
+ """Multi-Head Attention layer.
+ Args:
+ n_head: The nu... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/espnet_multihead_attention.py |
Document functions with clear intent | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import utils
fr... | --- +++ @@ -35,6 +35,15 @@
class TransformerEncoderBase(FairseqEncoder):
+ """
+ Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
+ is a :class:`TransformerEncoderLayer`.
+
+ Args:
+ args (argparse.Namespace): parsed command-line arguments
+ dictionary (~fairseq.da... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/transformer/transformer_encoder.py |
Write docstrings including parameters and return values | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import utils
from fair... | --- +++ @@ -22,6 +22,21 @@
class AugTransformerDecoderBase(TransformerDecoderBase):
+ """
+ Transformer decoder augmented with an additional cross-attention. Each layer
+ is a :class:`AugTransformerDecoderLayerBase`.
+
+ Args:
+ cfg (argparse.Namespace): parsed command-line arguments
+ dic... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/transformer/transformer_decoder_aug.py |
Write reusable docstrings | #!/usr/bin/env python3
import copy
from dataclasses import dataclass, field
import logging
import torch
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_availa... | --- +++ @@ -1,5 +1,9 @@ #!/usr/bin/env python3
+"""
+Used for EMA tracking a given pytorch module. The user is responsible for calling step()
+and setting the appropriate decay
+"""
import copy
from dataclasses import dataclass, field
@@ -34,6 +38,7 @@
class EMAModule:
+ """Exponential Moving Average of Fa... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/ema_module.py |
Write docstrings for utility functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.models import (
register_model,
register_model_architecture... | --- +++ @@ -21,6 +21,10 @@
@register_model("transformer")
class TransformerModel(TransformerModelBase):
+ """
+ This is the legacy implementation of the transformer model that
+ uses argparse for configuration.
+ """
@classmethod
def hub_models(cls):
@@ -77,6 +81,7 @@
@classmethod
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/models/transformer/transformer_legacy.py |
Add concise docstrings to each method | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from fairseq.modules import Fp32GroupNorm
class KmeansVectorQuantizer(nn.Module):
def __init__(
... | --- +++ @@ -12,6 +12,17 @@ def __init__(
self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25
):
+ """Vector quantization using straight pass-through estimator (i.e. kmeans)
+
+ Args:
+ dim: input dimension (channels)
+ num_vars: number of qua... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/kmeans_vector_quantizer.py |
Add docstrings to my Python code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Any, Dict, List, Tuple, Union
import torch
import torch.utils.checkpoint as checkpoint
from fairseq impor... | --- +++ @@ -12,6 +12,19 @@
def checkpoint_wrapper(m, offload_to_cpu=False):
+ """
+ A friendlier wrapper for performing activation checkpointing.
+
+ Compared to the PyTorch version, this version:
+ - wraps an nn.Module, so that all subsequent calls will use checkpointing
+ - handles keyword argument... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/checkpoint_activations.py |
Replace inline comments with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from torch imp... | --- +++ @@ -13,6 +13,12 @@
class LearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ Padding ids are ignored by either offsetting based on padding_idx
+ or by setting padding_idx to None and ensuring that the appropriate
+ position ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/learned_positional_embedding.py |
Document functions with clear intent | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class LayerDropModuleList(nn.ModuleList):
def __init__(self, p, modules=None):
super().__ini... | --- +++ @@ -2,12 +2,36 @@ #
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+LayerDrop as described in https://arxiv.org/abs/1909.11556.
+"""
import torch
import torch.nn as nn
class LayerDropModuleList(nn.ModuleList):
+ """
... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/layer_drop.py |
Write docstrings for algorithm functions | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
from collections import Counter
import torch
class EM:
def __init__(
self, W, n_centroi... | --- +++ @@ -12,6 +12,24 @@
class EM:
+ """
+ EM algorithm used to quantize the columns of W to minimize
+
+ ||W - W_hat||^2
+
+ Args:
+ - W: weight matrix of size (in_features x out_features)
+ - n_iter: number of k-means iterations
+ - n_centroids: number of ce... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/quantization/pq/em.py |
Generate documentation strings for clarity | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import wi... | --- +++ @@ -48,6 +48,27 @@
class LightweightConv1d(nn.Module):
+ """Lightweight Convolution assuming the input is BxCxT
+ This is just an example that explains LightConv clearer than the TBC version.
+ We don't use this module in the model.
+
+ Args:
+ input_size: # of channels of the input and o... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/lightweight_convolution.py |
Help me document legacy Python code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.i... | --- +++ @@ -74,6 +74,29 @@
@with_incremental_state
class DynamicConv1dTBC(nn.Module):
+ """Dynamic lightweight convolution taking T x B x C inputs
+ Args:
+ input_size: # of channels of the input
+ kernel_size: convolution channels
+ padding_l: padding to the left when using "same" padding... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/dynamic_convolution.py |
Add return value explanations in docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
def logsumexp(x, dim=1):
return torch.logsumexp(x.float(), dim=dim).type_as(x)
... | --- +++ @@ -3,6 +3,18 @@ # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""
+This file is to re-implemented the low-rank and beam approximation of CRF layer
+Proposed by:
+
+Sun, Zhiqing, et al.
+Fast Structured Decoding for Sequence Model... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/dynamic_crf_layer.py |
Document helper functions with docstrings | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from fairseq.modules import (
ESPNETMultiHeadedAttention,
LayerNorm,
MultiheadAttentio... | --- +++ @@ -19,6 +19,7 @@
class ConvolutionModule(torch.nn.Module):
+ """Convolution block used in the conformer block"""
def __init__(
self,
@@ -30,6 +31,16 @@ bias=False,
export=False,
):
+ """
+ Args:
+ embed_dim: Embedding dimension
+ ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/conformer_layer.py |
Generate helpful docstrings for debugging | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm, Mul... | --- +++ @@ -14,6 +14,10 @@
class TransformerSentenceEncoderLayer(nn.Module):
+ """
+ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
+ models.
+ """
def __init__(
self,
@@ -108,6 +112,10 @@ self_attn_mask: Optional[torch.Tensor] = None,
self_att... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/transformer_sentence_encoder_layer.py |
Create Google-style docstrings for my code | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
from operator import attrgetter, itemgetter
import torch
import numpy as np
import torch.distributed as dist
import t... | --- +++ @@ -29,6 +29,36 @@ verbose=True,
state_dict=None,
):
+ """
+ Quantize a model in-place by stages. All the targeted
+ layers are replaced by their quantized counterpart,
+ and the model is ready for the finetuning of the
+ centroids in a standard training loop (no modifications
+ requ... | https://raw.githubusercontent.com/facebookresearch/fairseq/HEAD/fairseq/modules/quantization/pq/utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.