repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NMTGMinor | NMTGMinor-master/test/modules/self_multihead_attn_func.py | """
Self-attention with multi-head attention.
Code is taken from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
from torch.cuda.amp import custom_fwd, custom_bwd
try:
import mask_softmax_dropout_cuda
... | 24,368 | 44.464552 | 127 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_m2m100.py | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/... | 41,949 | 40.208251 | 120 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/configuration_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | 21,508 | 56.819892 | 153 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/positional_embeddings.py | from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import math
def make_positions(tensor, padding_idx: int):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignor... | 5,604 | 37.655172 | 94 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_outputs.py | from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
from .file_utils import ModelOutput
@dataclass
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTe... | 1,557 | 49.258065 | 168 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the L... | 31,118 | 44.830633 | 128 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/tokenization.py | import onmt.markdown
import argparse
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-data_file', default="",
help="Path to the data")
parser.add_argument('-plm_vocab_file', default="", type=str,
help... | 3,440 | 34.112245 | 99 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_deltalm.py | # coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.or... | 45,662 | 39.09043 | 129 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | 41,160 | 44.231868 | 146 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/adapter.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from onmt.modules.layer_norm import LayerNorm
class Adapter(torch.nn.Module):
def __init__(self, input_dim, downsample_factor=2):
self.input_dim = input_dim
self.middle_dim = input_dim // downsample_factor
super(Adapter... | 3,348 | 33.525773 | 112 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_mbart.py | # coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.or... | 102,521 | 43.965789 | 155 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/tokenization_deltalm.py | import torch
import os
from contextlib import contextmanager
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from transformers.tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file... | 18,485 | 42.805687 | 221 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/tokenization_mbart50eu.py | import torch
import os
from contextlib import contextmanager
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from transformers.tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
# SPIECE_UNDERLINE = "▁"
#
# VOCAB_FILES_NAMES = {"vocab... | 15,946 | 47.471125 | 223 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_bart.py | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/... | 90,013 | 45.327329 | 161 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import fnmatch
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import tempfile
from collections imp... | 22,028 | 37.512238 | 150 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/activations.py | import logging
import math
import torch
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def swish(x):
return x * torch.sigmoid(x)
def _gelu_python(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: Open... | 1,536 | 26.446429 | 115 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_whisper.py | import copy
import math
import random
from typing import Optional, Tuple, Any, Dict, List, Union
import torch
import torch.utils.checkpoint
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import numpy as np
from torch.nn import CrossEntropyLoss, MSELos... | 35,008 | 40.876794 | 121 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/modeling_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | 8,232 | 40.791878 | 119 | py |
NMTGMinor | NMTGMinor-master/pretrain_module/huggingface_tokenizers_tbc/file_utils.py | # Copyright 2020 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
... | 2,795 | 33.95 | 117 | py |
NMTGMinor | NMTGMinor-master/ae/VariationalLayer.py | import torch
import torch.nn as nn
class VariationalLayer(nn.Module):
def __init__(self, inputSize, outputSize):
super(VariationalLayer, self).__init__()
print("Variational layer")
self.inputSize = inputSize
self.outputSize = outputSize
self.meanLL= nn.Linear(self.input... | 901 | 23.378378 | 63 | py |
NMTGMinor | NMTGMinor-master/ae/Trainer.py | from __future__ import division
import sys, tempfile
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
import time, datetime
import random
import numpy as np
from onmt.multiprocessing.multiproce... | 9,048 | 33.14717 | 136 | py |
NMTGMinor | NMTGMinor-master/ae/Evaluator.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from ae.Autoencoder import Autoencoder
import sys
model_list = ['transformer', 'stochastic_transformer']
class Evaluator(object... | 9,240 | 30.010067 | 111 | py |
NMTGMinor | NMTGMinor-master/ae/Autoencoder.py | import torch
import torch.nn as nn
import onmt
import torch.nn.functional as F
from ae.VariationalLayer import VariationalLayer
class Autoencoder(nn.Module):
def __init__(self, nmt_model,opt):
super(Autoencoder, self).__init__()
self.param_init = opt.param_init
self.nmt = nmt_m... | 11,572 | 40.332143 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/Rescorer.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model, build_language_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class Rescorer(obj... | 10,889 | 32.100304 | 111 | py |
NMTGMinor | NMTGMinor-master/onmt/constants.py | import torch
PAD = 0
UNK = 1
BOS = 2
EOS = 3
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
checkpointing = 0
static = False
residual_type = 'regular'
max_position_length = 8192
torch_version = float(torch.__version__[:3])
double_precision = False
recompute = False
neg_log_sigma1 = 0
neg... | 3,775 | 28.968254 | 110 | py |
NMTGMinor | NMTGMinor-master/onmt/optim.py | import math
import torch
import torch.optim as optim
from torch.optim.optimizer import Optimizer
class AdamWrapper(optim.Adam):
def step(self, closure=None, fake=False):
if fake:
return
else:
super(AdamWrapper, self).step(closure=closure)
class AdamWWrapper(opti... | 21,058 | 37.854244 | 116 | py |
NMTGMinor | NMTGMinor-master/onmt/model_factory.py | import torch
import torch.nn as nn
import onmt
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, MixedEncoder
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.relative_transformer import RelativeTransformer
from onmt.modules.copy_generator import Cop... | 41,066 | 44.277839 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/Dict.py | import torch
import math
import random, string
from multiprocessing import Pool
from collections import Counter
import os
from onmt.utils import safe_readline
class Dict(object):
def __init__(self, data=None, lower=False):
self.idxToLabel = {}
self.labelToIdx = {}
self.frequencies = {}
... | 8,644 | 28.810345 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/utils.py | import logging, traceback
import os, re
import torch
import torchaudio
import math
import soundfile as sf
import torch
import torch.nn.functional as F
# this function is borrowed from Facebook
# avoid jumping into the middle of a character
def safe_readline(f):
pos = f.tell()
while True:
try:
... | 10,432 | 36.128114 | 128 | py |
NMTGMinor | NMTGMinor-master/onmt/online_translator.py | import onmt
import onmt.modules
from collections import defaultdict
try:
from mosestokenizer import MosesDetokenizer, MosesTokenizer
except ImportError:
# print("[WARNING] Moses tokenizer is not installed. Models with 'detokenize' option won't have Moses-detokenized outputs")
MosesDetokenizer = None
Mos... | 15,658 | 33.339912 | 127 | py |
NMTGMinor | NMTGMinor-master/onmt/bayesian_factory.py | import torch
import torch.nn as nn
import onmt
from onmt.models.bayes_by_backprop.relative_transformer import \
RelativeTransformerEncoder, RelativeTransformerDecoder, BayesianTransformer
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.copy_generator import CopyGenerator
from options... | 4,008 | 37.92233 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/identity.py | import torch
from torch import Tensor
import torch.nn as nn
class Identity(torch.nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
Args:
args: any argument (unused)
kwargs: any keyword argument (unused)
Examples::
>>> m = nn.Identity(54, unused_argume... | 665 | 23.666667 | 77 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/checkpoint.py | import torch
import warnings
from torch.utils.checkpoint import get_device_states, set_device_states, check_backward_validity
def detach_variable(inputs):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
x = inp.detach()
x.requires_grad = inp.requires_grad
... | 7,659 | 43.277457 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/pre_post_processing.py | import torch
import torch.nn as nn
from .layer_norm import LayerNorm, MultilingualLayerNorm
import onmt
from onmt.modules.dropout import VariationalDropout
from onmt.modules.bottle import Bottle
# from onmt.modules.optimized.dropout_add import fused_dropout_add
class PrePostProcessing(nn.Module):
"""Applies proce... | 3,085 | 36.180723 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/rotary_postional_encodings.py | import torch
from torch import nn, einsum
# from einops import rearrange, repeat
class SinusoidalEmbeddings(torch.nn.Module):
def __init__(self, dim, base=10000):
super().__init__()
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)... | 2,003 | 35.436364 | 86 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/lru.py | import torch
import torch.nn as nn
import numpy as np
class LRU(nn.Module):
def __init__(self, H, N, reverse=False, r_min=0, r_max=1, max_phase=2 * np.pi):
super().__init__()
"""Initialize parameters of the LRU layer."""
# N: state dimension, H: model dimension
# Initialization o... | 2,938 | 37.168831 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/copy_generator.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
from onmt.modules.linear import XavierLinear
import math
import onmt
class CopyGenerator(nn.Module):
"""Generator module that additionally considers copying
words directly from the source.
The main idea is that we have an... | 4,676 | 33.389706 | 109 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bottle.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
"""
Class Bottle:
When working with masked tensors, bottles extract the "true" tensors
using masks to avoid unnecessary computation
"""
class Bottle(nn.Module):
def __init__(self, function):
super(Bottle, se... | 2,044 | 30.953125 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/base_seq2seq.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt, math
from onmt.modules.optimized.linear import Linear, linear_function
class Generator(nn.Module):
def __init__(self, hidden_size, output_size, fix_norm=False):
super(Generator, self).__init__()
self.hidden_s... | 5,861 | 32.497143 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/loss.py | import math
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import onmt
import onmt.modules
from onmt.utils import flip
def tiny_value_of_dtype(dtype: torch.dtype):
"""
Returns a moderately tiny value for a given PyTorch data type that i... | 23,600 | 38.400668 | 114 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/convolution.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
class Conv2dSubsampling(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.0):
"""
:param input_dim: the log mel feature (normally 40)
:param output_dim: network size (... | 7,447 | 35.509804 | 116 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/test_layer_norm.py | import unittest
import sys
import os
import numpy as np
import torch
#
# import fast_layer_norm as fln
# from apex.contrib.layer_norm.layer_norm import FastLayerNorm
import fast_layer_norm_cuda as fln
from layer_norm import LayerNorm
class GPUTimer:
def __init__(self, stream):
self.start_ = torch.cuda.Ev... | 7,976 | 27.797834 | 93 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/linear.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
# from onmt.modules.swish import Swish
from onmt.modules.dropout import VariationalDropout
# different linears for the same input
def group_linear(linears, input,... | 5,439 | 26.897436 | 77 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/utilities.py | import torch
import torch.nn as nn
class AttributeEmbeddings(nn.Module):
def __init__(self, atb_dicts, atb_size):
self.n_attributes = len(atb_dicts)
self.atb_sizes = atb_size
super().__init__()
self.atb_embeddings = nn.ModuleDict()
for i in atb_dicts:
self.a... | 821 | 20.631579 | 85 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/dropout.py | import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import onmt
class VariationalDropout(torch.nn.Module):
def __init__(self, p=0.5, batch_first=False, inplace=False):
super().__init__()
self.p = p
self.batch_first = batch_first
self.... | 5,102 | 29.375 | 108 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/sinusoidal_positional_encoding.py | import torch.nn as nn
import torch
import math
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2... | 4,400 | 33.116279 | 102 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/weight_control_lstm.py | # This is the
import torch
import torch.nn as nn
from torch.nn import Parameter
from functools import wraps
import math
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0,):
"""
:param module: a LSTM module
:param weights:
:param dropout:
:par... | 7,439 | 34.769231 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/performer.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.cuda.amp import autocast
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
# helpers
def exists(val):
return val is not None
def empty(tensor):
return tensor.nume... | 10,788 | 33.691318 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/relative_attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.constants import double_precision
def _rel_shift(x, zero_triu=False):
# zero_pad size: [q_len, 1, bsz, n_head]
zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),
device=x.device, dtype=x.dtype)
x_p... | 18,254 | 35.148515 | 116 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/rezero.py | # Implementation of the ReZERO training strategy
import torch
import torch.nn as nn
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.tensor(1e-3))
self.fn = fn
def forward(self, x):
return x * self.g
| 291 | 18.466667 | 49 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/swish.py | import torch
import torch.nn as nn
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from .optimized.compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) ... | 1,489 | 23.42623 | 78 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/layer_norm.py | import math
import torch
import numbers
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
import importlib
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .optimized.compat import custom_fwd, custo... | 7,129 | 31.557078 | 123 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/attention.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.modules.static_dropout import StaticDropout
from onmt.modules.linea... | 5,883 | 40.43662 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/lsh_attention.py | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at... | 18,617 | 41.027088 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/static_dropout.py | import torch
from torch.autograd.function import InplaceFunction, Function
from torch.autograd import Variable
from itertools import repeat
import torch.nn as nn
class StaticDropoutFunction(Function):
@staticmethod
def forward(ctx, input, module, train=False):
ctx.train = train
... | 2,235 | 28.421053 | 78 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/nce/nce_loss.py | """NCE Implementation from https://github.com/Stonesjtu/Pytorch-NCE"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import onmt
class NCELoss(_Loss):
def __init__(self, hidden_size, output_size, noise_ratio=256, logz=1, label_smoothing=0.... | 4,012 | 41.691489 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/nce/nce_utils.py | import torch
import onmt
# from math import is_close
def build_unigram_noise(freq, alpha=1.0):
"""
:param alpha: scaling factor. 0.0 = uniform distribution
:param freq: torch tensor with frequencies of each word
:return: torch tensor - probability distribution (multinomial distribution)
"""
p... | 580 | 23.208333 | 79 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/nce/nce_linear.py | """An index linear class for generic NCE module"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import isclose
BACKOFF_PROB=1e-10
class AliasMultinomial(torch.nn.Module):
''' Alias sampling method to speedup multinomial sampling
The alias method treats multinomial... | 9,215 | 38.553648 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/multilingual_partitioned/linear.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import math
class MPLinear(torch.nn.Module):
"""
A linear layer with partitioned weights
"""
# TODO: write gradcheck testing
def __init__(self, input_size, output_size, factor_size):
super().__init__()
self.facto... | 3,881 | 30.306452 | 100 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/multilingual_partitioned/relative_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.relative_self_attention_func import relative_self_attn_func
class MPRelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details... | 4,563 | 41.654206 | 106 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/multilingual_partitioned/encdec_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.encdec_attention_func import encdec_attn_func
class MPEncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
... | 4,713 | 41.468468 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/multilingual_factorized/multilingual_adapters.py | # Implementation of the multilingual adapter as in Bapna et. al 2019
import torch
from torch.nn import Parameter
import torch.nn.functional as F
import math
from ..optimized.feed_forward import PositionWiseFeedForward
from ..layer_norm import LayerNorm
def xavier_normal(weight, gain=1.0):
fan_in, fan_out = weig... | 1,342 | 25.333333 | 96 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/multilingual_factorized/linear.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.cuda.amp import autocast
class MultilingualLinear(torch.nn.Module):
def __init__(self, input_size, output_size, n_factors=1, rank=1,
use_multiplicative=False,
weight_drop=0.0, mfw_activation="none", n... | 9,046 | 35.926531 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/multilingual_factorized/relative_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.relative_self_attention_func import relative_self_attn_func
class MFWRelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more detail... | 15,431 | 43.472622 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/multilingual_factorized/encdec_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.encdec_attention_func import encdec_attn_func
class MFWEncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
... | 9,527 | 45.028986 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/adaptive/feed_forward.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
class AdaptiveFeedForward(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self,... | 3,676 | 40.784091 | 109 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/adaptive/relative_self_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from ..optimized. relative_self_attention_func import relative_self_attn_func
if hasattr(torch._C, '_jit_set_profiling_executor'):
torch._C._jit_set_profiling_executor(False)
if hasattr(torch._C, '_jit_... | 4,993 | 42.051724 | 106 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/adaptive/encdec_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_attention_func import encdec_attn_func
if hasattr(torch._C, '_jit_set_profiling_executor'):
torch._C._jit_set_profiling_executor(False)
if hasattr(torch._C, '_jit_set_profiling_mode'):
tor... | 4,479 | 40.481481 | 110 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/test_self_attention_bias_func.py | import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
from self_attention_func import self_attn_func
from self_attention_attnbias_func import self_attn_bias_func
class Parameters(torch.nn.Mod... | 15,445 | 48.348243 | 124 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/attention_softmax.py | import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import mask_softmax_dropout_cuda
except (ModuleNotFoundError, ImportError) as e:
mask_softmax_dropout_cud... | 2,403 | 34.352941 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/dropout_add.py | import torch
import unittest
import numpy as np
from time import time
from torch.cuda.amp import custom_fwd, custom_bwd
try:
import fused_dropout_add_cuda
except (ModuleNotFoundError, ImportError) as e:
fused_dropout_add_cuda = None
#
# @torch.jit.script
# def jit_dropout_add(x, residual, prob, is_training):... | 6,838 | 35.572193 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/fused_clip_norm.py | # code is borrowed from NVIDIA Apex
# https://github.com/NVIDIA/apex/blob/master/apex/contrib/clip_grad/clip_grad.py
import torch
from torch._six import inf
from typing import Union, Iterable
from onmt.utils import clip_grad_norm
try:
import fused_optim
except (ModuleNotFoundError, ImportError) as e:
fused_op... | 4,710 | 33.137681 | 87 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/self_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_attention_func import self_attn_func
from onmt.constants import double_precision
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1... | 6,492 | 42.871622 | 116 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/encdec_attention_func_bias.py | """
Encoder-Decoder multi-head attention.
Code is heavily adapted from apex
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .... | 53,892 | 47.464928 | 128 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/feed_forward.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout, ReLUDropout
from onmt.modules.swish import SiLU
import onmt
from torch.cuda.amp import autocast
class AGELU(torch.nn.Module):
def forward(self, input... | 8,671 | 40.692308 | 110 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/self_attention_attnbias_func.py | """
Self-attention with multi-head attention.
Code is taken from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, Impor... | 29,122 | 44.082043 | 127 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/test_rel_self_attention_func.py | import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
import math
from self_attention_func import self_attn_func
from relative_self_attention_func import relative_self_attn_func
# Positional ... | 23,988 | 49.932059 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/linear.py | import torch
from torch import Tensor
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import linear_blaslt
except (ModuleNotFoundError, ImportError) as e:
linear_blaslt = None
def _cast_if_autocast... | 5,012 | 25.109375 | 96 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/relative_self_attention_func.py | """
Self-attention with relative position encoding and multi-head attention.
Code is heavily adapted from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, cust... | 43,985 | 46.862894 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/test_self_attention_func.py | import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
from self_attention_func import self_attn_func
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
... | 15,206 | 48.534202 | 111 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/softmax_xentropy.py | import torch
import xentropy_cuda
class SoftmaxCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False):
losses, max_log_sum_exp = xentropy_cuda.forward(
logits, labels, smoothing, half_to_float)
lo... | 1,023 | 34.310345 | 88 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/test_encdec_attention_func.py | import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
from encdec_attention_func_bias import encdec_attn_bias_func
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, he... | 22,617 | 52.980907 | 116 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/compat.py | import torch
import functools
def custom_fwd(fwd=None, **kwargs):
"""
Helper decorator for ``forward`` methods of custom autograd functions (subclasses of
:class:`torch.autograd.Function`). See the :ref:`example page<amp-custom-examples>` for more detail.
Arguments:
cast_inputs (:class:`torc... | 2,177 | 31.507463 | 106 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/fused_adam.py | import torch
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
import fused_optim
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.... | 8,746 | 44.557292 | 145 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/encdec_attention_func.py | """
Encoder-Decoder multi-head attention.
Code is heavily adapted from apex
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .... | 36,308 | 46.964333 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/fast_mha.py | ###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistribution... | 7,225 | 40.768786 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/flash_mha.py | ###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistribution... | 11,249 | 44.731707 | 101 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/rotary_encodings.py | import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class SinusoidalEmbeddings(torch.nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def ... | 1,081 | 30.823529 | 80 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/relative_self_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .relative_self_attention_func import relative_self_attn_func
from .relative_self_attention_func import RelativeShift
import onmt
class RelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention... | 10,003 | 42.307359 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/self_attention_func.py | """
Self-attention with multi-head attention.
Code is taken from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, Impor... | 43,104 | 44.421496 | 130 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/optimized/encdec_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_attention_func import encdec_attn_func
import onmt
class EncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
""... | 6,634 | 41.261146 | 111 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/setup.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build ... | 17,708 | 51.862687 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/setup_base.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build ... | 15,451 | 50.165563 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/flashattn/setup.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
from pathlib import Path
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import ... | 6,031 | 34.904762 | 101 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/mlp_blaslt/test_fused_dense.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat imp... | 20,463 | 37.466165 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/mlp_blaslt/setup.py | import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__)... | 5,888 | 45.738095 | 277 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/mlp_blaslt/test_linear.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
from torch import Tensor
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
fr... | 11,042 | 36.181818 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/blru/blru.py | import sys
python = sys.argv[1]=="0"
import time
if not python:
from torch.utils.cpp_extension import load
blru = load(name="blru", sources=["blru.cpp","blru_kernel.cu"]) #, verbose=True)
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from torch.cuda.amp imp... | 6,085 | 34.383721 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/multihead_attn/setup.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
def get_cuda_bare_metal_version(cuda_dir):
raw_output = s... | 5,186 | 42.588235 | 98 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/layer_norm/test_layer_norm.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
import fast_layer_norm_cuda
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:... | 7,215 | 31.071111 | 130 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.