repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/multihead_attn_blaslt/setup.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build ... | 5,264 | 39.19084 | 101 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/mlp/mlp_gelu_dropoutadd.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
import silu_cuda
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
... | 11,654 | 32.491379 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/batch_ensemble/be_relative_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H... | 34,591 | 45 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/batch_ensemble/be_encdec_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
# from onmt.constants import double_precision
# from .batch_ensemble_linear import BatchEnsembleMM as mm
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensembl... | 26,603 | 46.677419 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/batch_ensemble/batch_ensemble_linear.py | import torch
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H_out]
... | 12,098 | 38.15534 | 109 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/utils.py | import torch
def flatten_list(tensors):
flat = list()
indices = list()
shapes = list()
s = 0
for tensor in tensors:
shapes.append(tensor.shape)
flat_t = torch.flatten(tensor)
size = flat_t.shape[0]
flat.append(flat_t)
indices.append((s, s+size))
s... | 599 | 18.354839 | 46 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/feed_forward.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
class PositionWiseFeedForward(nn.Module):
"""Multi-head... | 4,046 | 37.913462 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/gaussian.py | import torch
import torch.nn.functional as F
import numpy
import math
import torch.nn as nn
log_sqrt_2pi = math.log(math.sqrt(2 * math.pi))
class Gaussian(object):
def __init__(self, mu, rho):
super().__init__()
self.mu = mu
self.rho = rho
self.normal = torch.distributions.Normal(... | 2,619 | 33.473684 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/generator.py | class Generator(nn.Module):
def __init__(self, hidden_size, output_size, fix_norm=False):
super(Generator, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.linear = nn.Linear(hidden_size, output_size)
self.fix_norm = fix_norm
stdv... | 1,214 | 31.837838 | 77 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/relative_self_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
from ..optimized.relative_self_attention_func import relative_self_attn_func
# from .fast_self_multihead_attn_fun... | 5,261 | 42.131148 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/encdec_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from ..optimized.encdec_attention_func import encdec_attn_func
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
class EncdecMultiheadAttn(nn.Module):
"""Multi... | 5,175 | 42.133333 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/kernels/kernel.py |
"""Construct wide convolution kernels."""
from typing import Optional, Mapping, Tuple, Union
from collections import defaultdict
import math
import torch
import torch.nn as nn
| 180 | 15.454545 | 50 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/mlp/test_mlp_gelu.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat imp... | 22,661 | 38.005164 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/mlp/mlp.py | from copy import copy
import math
import torch
from torch import nn
import unittest
from time import time
import numpy as np
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_re... | 15,995 | 33.252677 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/mlp/test_mlp_relu.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat imp... | 13,167 | 36.409091 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/models/transformer_xl.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transfor... | 9,715 | 36.513514 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/transformers.py | import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.utils.checkpoint import checkpoint
import onmt
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer, PositionalEncoding, \
PrePostProcessing
... | 40,743 | 39.18146 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/performer_layer.py | import torch
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> ... | 9,531 | 35.945736 | 136 | py |
NMTGMinor | NMTGMinor-master/onmt/models/transformer_layers.py | import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.modules.static_dropout import StaticDropout
from onmt.modules.linear import XavierLinear as Linear
from... | 17,782 | 39.142212 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/pretrain_transformer.py | import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.utils.checkpoint import checkpoint
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.models.transformers import Tr... | 18,440 | 45.923664 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, Transformer... | 49,047 | 40.11316 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/models/relative_transformer_layers.py | import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.utils impor... | 10,207 | 44.775785 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/lstm.py | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import defaultdict
import math
import onmt
from onmt.modules.base_seq2seq import NMTModel, DecoderState
from onmt.models.transformer_layers import P... | 30,801 | 39.002597 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/perceiver.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.mo... | 3,242 | 42.24 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/classifier.py | import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
class TransformerClassifier(nn.Module):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, generator=None, mpc=False, **kwargs):
... | 2,848 | 30.655556 | 107 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/lid_loss.py | import math
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import onmt
import onmt.modules
from onmt.utils import flip
class CrossEntropyLIDLoss(_Loss):
"""
Class for managing efficient loss computation.
loss computations
Users ... | 3,563 | 32.308411 | 108 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/conformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
import onmt
from onmt.modules.base_seq2seq import N... | 12,149 | 37.571429 | 126 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/wav2vec2.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from typing import List, Optional, Union
from collections import defaultdict
import onmt
from onmt.modules.optimized.linear import Linear
import math
from .fairseq_wav2... | 47,708 | 43.839286 | 123 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/wavlm.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from typing import List, Optional, Union
from collections import defaultdict
import onmt
from onmt.modules.optimized.linear import Linear
import math
from .fairseq_wav2... | 8,507 | 40.300971 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/conformer_layers.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.dropout import variational_dropout
f... | 6,311 | 43.450704 | 109 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.mo... | 28,183 | 42.293395 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/relative_transformer_layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.optimize... | 24,391 | 52.026087 | 127 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/mhs4.py | #!/usr/bin/env python3
from typing import Optional, List, Tuple, Union
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.nn.functional as F
from torch import Tensor
# import pykeops
# import pykeops.torch
# from pykeop... | 37,369 | 32.515695 | 114 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/fft_convolution.py | import torch
| 14 | 4 | 12 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/ssm_kernel/ssm_kernel_coefficient.py | #!/usr/bin/env python3
import torch
from opt_einsum import contract
import os
import pathlib
import ssm_kernel_coefficient_cuda
# from torch.utils.cpp_extension import load
# ssm_kernel_coefficient_binding = load(
# name="ssm_kernel_coefficient_binding",
# sources=[
# os.path.join(
# path... | 6,529 | 35.077348 | 102 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/ssm_kernel/setup.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
from pathlib import Path
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import ... | 5,691 | 34.798742 | 101 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wavlm_modules.py | # --------------------------------------------------------
# WavLM: Large-Scale Self-Supervised Pre-training for Full Stack Speech Processing (https://arxiv.org/abs/2110.13900.pdf)
# Github source: https://github.com/microsoft/unilm/tree/master/wavlm
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [se... | 18,345 | 39.320879 | 186 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/enum.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, EnumMeta
from typing import List
class StrEnumMeta(EnumMeta):
# this is workaround for submitit pickling leading ... | 1,753 | 31.481481 | 107 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/adapter.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from onmt.modules.layer_norm import LayerNorm
class Adapter(torch.nn.Module):
def __init__(self, input_dim, downsample_factor=2):
self.input_dim = input_dim
self.middle_dim = input_dim // downsample_factor
super(Adapter, s... | 3,345 | 34.595745 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/utils.py | try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from omegaconf import DictConfig, OmegaConf, open_dict, _utils
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple, Callable, Dict, List, TYPE_CHECKING
from o... | 11,673 | 36.536977 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wav2vec2.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import copy
import numpy as np
import torch
import torch.... | 63,460 | 36.440118 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wavlm.py | import math
import logging
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.layer_norm import LayerNorm
from .fairseq_modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
SamePad,
TransposeLas... | 35,759 | 37.123667 | 234 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/fairseq_modules.py | import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import math
from typing import Dict, Optional, Tuple
import torch
from torch.cuda.amp import custom_fwd, custom_bwd
from onmt.modules.optimized.self_attention_func import self_attn_func, self_attn_compact_... | 41,973 | 39.870497 | 128 | py |
NMTGMinor | NMTGMinor-master/onmt/models/bayes_by_backprop/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, Transformer... | 18,937 | 38.372141 | 114 | py |
NMTGMinor | NMTGMinor-master/onmt/models/bayes_by_backprop/relative_transformer_layers.py | import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.utils import flip
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.attention import MultiHeadAttention
f... | 8,703 | 40.447619 | 103 | py |
NMTGMinor | NMTGMinor-master/onmt/models/multilingual_translator/reversible_transformers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.encdec_attention import E... | 21,390 | 34.89094 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/models/multilingual_translator/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, Transformer... | 21,668 | 43.58642 | 130 | py |
NMTGMinor | NMTGMinor-master/onmt/models/multilingual_translator/relative_transformer_layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.opti... | 21,330 | 50.524155 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/models/discourse/discourse_transformer.py | # Transformer with discourse information
from collections import defaultdict
import onmt
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from onmt.modules.pre_post_processing import PrePostProcessing
from .gate_layer import R... | 10,079 | 42.261803 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/models/discourse/gate_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropou... | 11,711 | 51.756757 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/transformer_decoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional
from collections import defaultdict
import torch
import torch.nn as nn
from .module... | 15,616 | 35.832547 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/transformer_encoder.py | import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
# from fairseq.modules import (
# FairseqDropout,
# LayerDropModuleList,
# LayerNorm,
# PositionalEmbedding,
# SinusoidalPositionalEmbedding,
# )
from .modules.positional_embeddings import PositionalEmbedding, Si... | 7,894 | 33.627193 | 111 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/deltalm.py | import os
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from .transformer_encoder import TransformerEncoderBase
from .transformer_decoder import TransformerDecoderBase
from .modules.transformer_layer import TransformerDecoderLayerBase
from .modules.ut... | 17,669 | 34.841785 | 103 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/multihead_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
# from fairseq import utils
# from fairseq.... | 19,975 | 40.272727 | 90 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/efficient_adapters.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import get_activation_fn
from onmt.modules.layer_norm import layer_norm_func
from onmt.modules.optimized.linear import Linear as LinearModule
def Linear(in_features, out_features, bias=True):
m = LinearModule(in_features, out_features... | 4,610 | 38.75 | 96 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/positional_embeddings.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tenso... | 6,620 | 36.619318 | 94 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/utils.py | try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple, Callable, Dict, List, TYPE_CHECKING
import numpy as np
import torch
def gelu_accurate(x):
if not ha... | 1,520 | 24.779661 | 91 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/transformer_layer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
# from fairseq import utils
# from onmt.models.speech_recognizer.f... | 14,593 | 35.303483 | 95 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/activation_functions.py | import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import math
from typing import Dict, Optional, Tuple
import torch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0... | 507 | 27.222222 | 91 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/layer_drop.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
LayerDrop as described in https://arxiv.org/abs/1909.11556.
"""
import torch
import torch.nn as nn
class LayerDropModuleList(nn.ModuleLi... | 1,408 | 31.022727 | 71 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/reversible.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get... | 1,099 | 34.483871 | 106 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/relative_transformers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
# from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
# from onmt.modules.relative_attention import RelPartialLearna... | 22,745 | 35.865478 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/transformers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from tor... | 20,232 | 35.001779 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/transformers_testing2.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from tor... | 20,730 | 34.559177 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/speech/Augmenter.py | import math
import torch
from collections import defaultdict
import onmt
import random
class Augmenter(object):
"""
Implementation of the "Spec Augmentation" method
(Only vertical and horizontal masking)
"""
def __init__(self, F=8, mf=2, T=64, max_t=0.2, mt=2,
input_size=40, conc... | 1,821 | 26.19403 | 97 | py |
NMTGMinor | NMTGMinor-master/onmt/speech/ctc_loss.py | from distutils.version import LooseVersion
import numpy as np
import six
import torch
import torch.nn.functional as F
import onmt
class CTC(torch.nn.Module):
def __init__(self, vocab_size, hidden_size, dropout_rate,
ctc_type="builtin", reduce=True,
padding_idx=-1, blank_idx=0):
... | 3,645 | 30.162393 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/data/mmap_indexed_dataset.py | import os
import struct
import numpy as np
import torch
import torch.utils.data
from functools import lru_cache
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
... | 6,566 | 27.184549 | 105 | py |
NMTGMinor | NMTGMinor-master/onmt/data/wav_dataset.py | import torch
import torchaudio as taudio
from functools import lru_cache
from onmt.utils import safe_readaudio
import numpy as np
import soundfile
import math
import torchaudio
import os
# this function reads wav file based on the timestamp in seconds
def safe_readaudio_from_cache(file_, wav_path, start=0.0, end=0.0,... | 3,544 | 30.371681 | 102 | py |
NMTGMinor | NMTGMinor-master/onmt/data/multistream_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
"""
Data management for stream-to-stream models
Two basic classes:
- Batch stores the input / output ... | 21,315 | 35.62543 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/data/whisper_audio.py | import os
from functools import lru_cache
from typing import Optional, Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_L... | 4,767 | 31.435374 | 99 | py |
NMTGMinor | NMTGMinor-master/onmt/data/stream_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
"""
Data management for stream-to-stream models
Two basic classes:
- Batch stores the input / output ... | 17,676 | 35.598344 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/data/dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
import numpy as np
from .batch_utils import allocate_batch, allocate_batch_unbalanced
import dill
"""
... | 32,665 | 38.499395 | 129 | py |
NMTGMinor | NMTGMinor-master/onmt/data/data_iterator.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import random
import numpy as np
import torch
from onmt.data.dataset import rewrap
from onmt.data import data_utils
_sentine... | 14,073 | 31.354023 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/data/binarizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
import os
from onmt.utils import safe_readline, safe_readaudio
# from multiprocessing import Pool
import torch... | 20,533 | 40.906122 | 126 | py |
NMTGMinor | NMTGMinor-master/onmt/data/multi_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
from .dataset import Dataset
from .mmap_indexed_dataset import MMapIndexedDataset
from .scp_dataset import SCPIndexDataset
| 242 | 21.090909 | 52 | py |
NMTGMinor | NMTGMinor-master/onmt/data/multidata_iterator.py | import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
from .data_iterator import EpochBatchIterating, DataIterator
import numpy as np
import torch
class MultiEpochIterator(object):
# this class stores N epoch iterators for N datasets
# ini... | 9,036 | 34.163424 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/data/scp_dataset.py | import torch
from kaldiio import load_mat
from functools import lru_cache
import numpy as np
from .audio_utils import _parse_arkpath, ArkLoader
import warnings
warnings.filterwarnings("ignore", message="The given NumPy array is not writeable ")
class SCPIndexDataset(torch.utils.data.Dataset):
"""
This dataset... | 1,877 | 29.786885 | 92 | py |
NMTGMinor | NMTGMinor-master/onmt/data/indexed_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import struct
import nu... | 4,543 | 28.128205 | 84 | py |
NMTGMinor | NMTGMinor-master/onmt/data/audio_utils.py | import numpy as np
from contextlib import contextmanager
import io
from io import TextIOBase
import os
import subprocess
import sys
import warnings
from functools import partial
from io import BytesIO
from io import StringIO
import re
import struct
import sys
import warnings
import soundfile
import math
import torch
f... | 12,439 | 27.863109 | 95 | py |
NMTGMinor | NMTGMinor-master/onmt/data/lm_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.data.dataset import Dataset
class LanguageModelBatch(object):
def __init__(self, data, target, lang, **kwargs):
self.data = data
self.target = target
... | 4,860 | 28.822086 | 108 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/classify_trainer.py | from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator impor... | 31,146 | 38.576874 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/bayes_by_backprop_trainer.py | from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
from apex import amp
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.dataset import rewrap
from onmt.model_factor... | 23,966 | 38.35468 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/accent_gan_trainer.py | from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
from apex import amp
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.dataset import rewrap
from onmt.model_factor... | 39,445 | 36.675263 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/evaluator.py | from __future__ import division
import sys, tempfile
import onmt
import onmt.modules
#~ from onmt.metrics.gleu import sentence_gleu
#~ from onmt.metrics.sbleu import sentence_bleu
from onmt.metrics.bleu import moses_multi_bleu
#~ from onmt.utils import compute_score
import torch
import torch.nn as nn
from torch import... | 7,248 | 34.18932 | 95 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/gem_trainer.py | from __future__ import division
import datetime
import gc
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import numpy as np
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import ... | 30,109 | 40.077763 | 128 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/mp_trainer.py | from __future__ import division
import datetime
import gc
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import MultiDataIterator
f... | 68,654 | 42.452532 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/perplexity_scorer.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
model_list = ['t... | 3,625 | 30.258621 | 80 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/stream_translator.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
from collections import defaultdict
class StreamTra... | 22,096 | 41.250478 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/Beam.py | from __future__ import division
import torch
import onmt
"""
Class for managing the internals of the beam search process.
hyp1-hyp1---hyp1 -hyp1
\ /
hyp2 \-hyp2 /-hyp2hyp2
/ \
hyp3-hyp3---hyp3 -hyp3
================... | 4,071 | 28.085714 | 80 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/predictor.py | import onmt
import onmt.modules
import torch
from onmt.model_factory import build_classifier
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
from onmt.constants import add_tokenidx
from options import backward_compatible
model_list = ['transformer', 'stochastic_transformer', 'fusion_n... | 10,872 | 35.609428 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/global_translator.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
from collections import defaultdict
class GlobalStr... | 25,704 | 41.557947 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/fast_translator.py | import sys
import onmt
import onmt.modules
import torch
import math
from onmt.model_factory import build_model, optimize_model
from onmt.inference.search import BeamSearch, Sampling
from onmt.inference.translator import Translator
from onmt.constants import add_tokenidx
from options import backward_compatible
# buggy ... | 48,877 | 42.641071 | 126 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/search.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import onmt
class S... | 11,084 | 37.224138 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/ColdFusionTranslator.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model, build_fusion, build_language_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
clas... | 15,611 | 35.138889 | 124 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/nam_translate.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
model_list = ['t... | 23,188 | 40.483005 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/translator.py | import onmt
import onmt.modules
import torch
from onmt.model_factory import build_model, build_language_model, optimize_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
from onmt.constants import add_tokenidx
from options import backward_compatible
model_list = ['transformer', 's... | 19,446 | 35.485929 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/stochastic_transformer_layers.py | import torch
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
class StochasticEncoderLayer(EncoderLayer):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: drop... | 4,693 | 30.716216 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/stochastic_transformers.py | import numpy as np
import torch, math
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.legacy.stochastic_transformer_layers import StochasticEncoderLayer, StochasticDecoderLayer
from onmt.model... | 3,473 | 32.085714 | 143 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/DynamicTransformer/Dlcl.py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Wang Qiang
@contact: wangqiangneu@gmail.com
@desc: connection schema between layers
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class DynamicLinearCombination(nn.Module):
"""Implementation of Dynamic Linear Combina... | 6,453 | 35.88 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/DynamicTransformer/Models.py | import math
import torch
import onmt
from onmt.legacy.DynamicTransformer.Dlcl import DynamicLinearCombination
from onmt.models.transformers import TransformerEncoder, TransformerDecoder
from onmt.modules.dropout import embedded_dropout
from torch.utils.checkpoint import checkpoint
class DlclTransformerEncoder(Transf... | 9,788 | 36.505747 | 114 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.