repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/test_fused_sgd.py | import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import a... | 39,261 | 48.386164 | 261 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/test_promotion.py | import unittest
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT, DTYPES
class TestPromotion(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
de... | 2,558 | 32.671053 | 73 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/test_multiple_models_optimizers_losses.py | import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
class MyModel(to... | 36,499 | 46.837484 | 255 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/utils.py | import torch
HALF = 'torch.cuda.HalfTensor'
FLOAT = 'torch.cuda.FloatTensor'
DTYPES = [torch.half, torch.float]
ALWAYS_HALF = {torch.float: HALF,
torch.half: HALF}
ALWAYS_FLOAT = {torch.float: FLOAT,
torch.half: FLOAT}
MATCH_INPUT = {torch.float: FLOAT,
torch.half: HALF}... | 512 | 22.318182 | 57 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/test_rnn.py | import unittest
from apex import amp
import random
import torch
from torch import nn
from utils import common_init, HALF
class TestRnnCells(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
d... | 4,506 | 37.521368 | 87 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/test_add_param_group.py | import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
class MyModel(to... | 5,831 | 38.14094 | 102 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/test_checkpointing.py | import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from apex import amp
from utils import common_init, FLOAT
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 3, 1, 1)
... | 11,836 | 43.16791 | 107 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_amp/test_multi_tensor_l2norm.py | import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
from amp_C import multi_tensor_l2norm
from apex.multi_t... | 2,719 | 29.909091 | 100 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_fused_layer_norm/test_fused_layer_norm.py | import torch
from apex.normalization import FusedLayerNorm
from apex.normalization import FusedRMSNorm
from apex.normalization import MixedFusedLayerNorm
from apex.normalization import MixedFusedRMSNorm
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate... | 13,662 | 48.864964 | 121 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_fp16util/test_fp16util.py | import unittest
import torch
import torch.nn as nn
from apex.fp16_utils import FP16Model
class DummyBlock(nn.Module):
def __init__(self):
super(DummyBlock, self).__init__()
self.conv = nn.Conv2d(10, 10, 2)
self.bn = nn.BatchNorm2d(10, affine=True)
def forward(self, x):
retu... | 2,051 | 26 | 86 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_layers.py | import logging
import unittest
import typing
import torch
import torch.nn as nn
from torch.testing._internal import common_utils
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import layers
from apex.transformer.testing.commons import set_random_seed
from apex.transformer.testing.di... | 24,261 | 42.247772 | 122 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_fused_softmax.py | """Test for fused softmax functions.
Ref: https://github.com/NVIDIA/Megatron-LM/blob/40becfc96c4144985458ac0e0fae45dbb111fbd2/megatron/fused_kernels/tests/test_fused_kernels.py
""" # NOQA
import itertools
import torch
from torch.testing._internal import common_utils
from apex.transformer import AttnMaskType
from ap... | 11,330 | 38.618881 | 139 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_data.py | import logging
import torch.testing
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import data as data_utils
from apex.transformer.testing.distributed_test_base import NcclDistribu... | 2,165 | 32.323077 | 82 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/run_dynamic_batchsize_test.py | from typing import Tuple, List
import torch
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.schedules.common import (
_get_params_for_weight_decay_optimization,
)
from apex.transformer.pipeline_parallel.s... | 7,505 | 35.975369 | 107 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/run_bert_minimal_test.py | import random
import torch
try:
import torch_ucc
except ImportError:
HAS_TORCH_UCC = False
else:
HAS_TORCH_UCC = True
print("Use UCC as backend of Pipeline Parallel ProcessGroups")
from apex.transformer.enums import ModelType
from apex.transformer import tensor_parallel
from apex.transformer import par... | 9,722 | 36.252874 | 243 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_parallel_state.py | import logging
import os
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccD... | 7,525 | 39.031915 | 93 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_transformer_utils.py | import logging
import torch
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import utils
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
logging.... | 1,395 | 33.04878 | 92 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_mapping.py | import logging
import torch
from torch.testing._internal import common_utils
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import mappings
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import U... | 3,406 | 37.715909 | 92 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_cross_entropy.py | import logging
from typing import Tuple
import torch
import torch.nn.functional as F
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from apex.transformer.tensor_parallel imp... | 3,762 | 37.397959 | 118 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_random.py | import logging
import torch
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.trans... | 4,445 | 36.677966 | 89 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_transformer_module.py | from typing import Tuple
import os
import subprocess
import sys
import unittest
# TODO(crcrpar): should move this to `apex._testing` or whatever in the near future.
RUN_SLOW_TESTS = os.getenv('APEX_RUN_WITH_SLOW_TESTS', '0') == '1'
SEVERALGPU_TEST = [
"bert_minimal_test",
"gpt_minimal_test",
"dynamic_batc... | 3,168 | 31.336735 | 147 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_microbatches.py | import logging
from typing import List, Optional
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import (
_reconfigure_microbatch_calculator,
get_micro_batch_size,
... | 3,526 | 40.011628 | 138 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_p2p_comm.py | import logging
import unittest
import torch
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.testing.distributed_test_base import Ncc... | 4,671 | 36.98374 | 112 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_pipeline_parallel_fwd_bwd.py | import contextlib
import logging
import itertools
import re
from typing import Optional, Tuple, List
import unittest
import torch
from torch.testing._internal import common_utils
from torch.testing._internal import common_cuda
from apex._autocast_utils import _get_autocast_dtypes
from apex.transformer import parallel... | 23,872 | 42.014414 | 158 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/gpt_scaling_test.py | import subprocess
import os
from apex.transformer.testing.commons import TEST_SUCCESS_MESSAGE
def run_gpt(cmd):
args = list(cmd.split(" "))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = p.communicate()
outs = list(str((outs).decode("utf-8")).splitlines())
... | 3,890 | 32.25641 | 102 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/run_gpt_minimal_test.py | from functools import partial
from typing import List
import time
import torch
try:
import torch_ucc
except ImportError:
HAS_TORCH_UCC = False
else:
HAS_TORCH_UCC = True
print("Use UCC as backend of Pipeline Parallel ProcessGroups")
from apex.transformer import parallel_state
from apex.transformer.enu... | 8,592 | 37.361607 | 243 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_transformer/test_batch_sampler.py | import torch
from torch.testing._internal import common_utils
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from apex.transformer.pipeline_parallel.utils import _split_batch_into_microbatch as split_batch_into_microbatch
class MyIterableDataset(Dataset):
def __init__(self, start, e... | 5,464 | 38.601449 | 132 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L0/run_mlp/test_mlp.py | """Tests for c++ MLP"""
from itertools import product
from time import time
import torch
from torch import nn
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import onlyCUDA
from apex.... | 7,263 | 34.091787 | 93 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/amp_master_params/amp_master_params.py | import torch
import argparse
import os
from apex import amp
# FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead)
from apex.parallel import DistributedDataParallel
parser = argparse.ArgumentParser()
# FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied
# automatica... | 2,799 | 38.43662 | 103 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/amp_master_params/compare.py | import torch
model_params_rank0 = torch.load("rank0model.pth",
map_location = lambda storage, loc: storage.cuda(0))
model_params_rank1 = torch.load("rank1model.pth",
map_location = lambda storage, loc: storage.cuda(0))
master_params_rank0 = torch.load("rank0m... | 1,531 | 51.827586 | 104 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/synced_batchnorm/two_gpu_test_different_batch_size.py | import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from apex.parallel import SyncBatchNorm as ApexSyncBatchNorm
import argparse
import os
import numpy as np
var_batch = 16
def compare(desc, inp1, inp2, error= 1e-5):
a = inp1.clone().detach().cpu().numpy()
b = inp2... | 5,322 | 32.477987 | 99 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/synced_batchnorm/test_batchnorm1d.py | import torch
import apex
model = apex.parallel.SyncBatchNorm(4).cuda()
model.weight.data.uniform_()
model.bias.data.uniform_()
data = torch.rand((8,4)).cuda()
model_ref = torch.nn.BatchNorm1d(4).cuda()
model_ref.load_state_dict(model.state_dict())
data_ref = data.clone()
output = model(data)
output_ref = model_ref(d... | 484 | 24.526316 | 59 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/synced_batchnorm/test_groups.py | import torch
import numpy as np
import apex
import syncbn
import os
import argparse
import torch.optim as optim
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, ... | 6,690 | 34.973118 | 209 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/synced_batchnorm/single_gpu_unit_test.py | import torch
import numpy as np
import apex
if True:
print("using setup tools")
import syncbn
else:
print("using jit")
from torch.utils.cpp_extension import load
syncbn = load(name='syncbn', sources=['../../csrc/syncbn.cpp', '../../csrc/welford.cu'])
def compare(desc, inp1, inp2, error):
a = in... | 7,208 | 44.05625 | 209 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/synced_batchnorm/two_gpu_unit_test.py | import torch
import numpy as np
import apex
import syncbn
import os
import argparse
import torch.optim as optim
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, ... | 6,616 | 35.558011 | 209 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/synced_batchnorm/python_single_gpu_unit_test.py | import torch
import numpy as np
import apex
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, close)
z = a - b
index = (np.abs(z) >= error + error... | 4,004 | 34.758929 | 209 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/distributed/DDP/ddp_race_condition_test.py | import torch
import torch.distributed as dist
from torch.nn import Parameter
from torch.nn import Module
from apex.parallel import DistributedDataParallel as DDP
import argparse
import os
parser = argparse.ArgumentParser(description='allreduce hook example')
parser.add_argument("--local_rank", default=0, type=int)
ar... | 2,424 | 33.642857 | 95 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L1/common/compare.py | import argparse
import torch
parser = argparse.ArgumentParser(description='Compare')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--fused-adam', action='store_true')
par... | 2,231 | 33.338462 | 101 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L1/common/main_amp.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvi... | 19,169 | 35.375712 | 235 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/tests/L1/transformer/pipeline_parallel_fwd_bwd_ucc_async.py | import os
import logging
import itertools
from typing import Optional, Tuple, List
import unittest
import torch
from torch.testing._internal import common_utils
from torch.testing._internal import common_cuda
from torch.testing._internal import common_distributed
from apex._autocast_utils import _get_autocast_dtypes
... | 9,570 | 42.504545 | 158 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# au... | 8,119 | 31.610442 | 79 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/_autocast_utils.py | from typing import Optional, Sequence
import torch
__all__ = ["_cast_if_autocast_enabled"]
def _get_autocast_dtypes() -> Sequence[torch.dtype]:
if torch.cuda.is_bf16_supported():
return [torch.half, torch.bfloat16]
return [torch.half]
def _get_current_dtype(dtype: Optional[torch.dtype] = None) ->... | 664 | 23.62963 | 87 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/__init__.py | import logging
import warnings
# May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten
import torch
if torch.distributed.is_available():
from . import parallel
from . import amp
from . import fp16_utils
# For optimizers and normalization the... | 2,034 | 38.134615 | 170 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/scaler.py | import torch
from ..multi_tensor_apply import multi_tensor_applier
from ._amp_state import _amp_state, master_params, maybe_print
from itertools import product
def scale_check_overflow_python(model_grad, master_grad, scale, check_overflow=False):
# Exception handling for 18.04 compatibility
if check_overflow:
... | 10,494 | 47.142202 | 110 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/_process_optimizer.py | import types
from ..fp16_utils import master_params_to_model_params
from ..multi_tensor_apply import multi_tensor_applier
from ._amp_state import maybe_print
import torch
from ..optimizers import FusedSGD
class AmpOptimizerState(object):
def __init__(self):
pass
def _master_params_to_model_params(self):... | 20,747 | 41.342857 | 115 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/_amp_state.py | # This is a "header object" that allows different amp modules to communicate.
# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
# But apparently it's ok:
# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm
import os
import torch
TORCH_MAJOR = int(torc... | 2,008 | 27.7 | 92 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/utils.py | from . import compat
import functools
import itertools
import torch
def is_cuda_enabled():
return torch.version.cuda is not None
def get_cuda_version():
return tuple(int(x) for x in torch.version.cuda.split('.'))
def is_fp_tensor(x):
if is_nested(x):
# Fast-fail version of all(is_fp_tensor)
... | 7,222 | 33.232227 | 86 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/opt.py | import contextlib
import warnings
from .scaler import LossScaler, master_params
from ._amp_state import maybe_print
import numpy as np
class OptimWrapper(object):
def __init__(self, optimizer, amp_handle, num_loss):
self._optimizer = optimizer
self._amp_handle = amp_handle
self._num_loss ... | 3,446 | 32.144231 | 80 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/amp.py | from . import compat, rnn_compat, utils, wrap
from .handle import AmpHandle, NoOpHandle
from .lists import functional_overrides, torch_overrides, tensor_overrides
from ._amp_state import _amp_state
from .frontend import *
import functools
import itertools
import torch
_DECORATOR_HANDLE = None
_USER_CAST_REGISTRY = ... | 7,266 | 39.825843 | 101 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/compat.py | import torch
# True for post-0.4, when Variables/Tensors merged.
def variable_is_tensor():
v = torch.autograd.Variable()
return isinstance(v, torch.Tensor)
def tensor_is_variable():
x = torch.Tensor()
return type(x) == torch.autograd.Variable
# False for post-0.4
def tensor_is_float_tensor():
x =... | 1,393 | 28.659574 | 77 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/wrap.py | from . import compat
from . import utils
from ._amp_state import _amp_state
from . import rnn_compat
import functools
import torch
def make_cast_wrapper(orig_fn, cast_fn, handle,
try_caching=False):
@functools.wraps(orig_fn)
def wrapper(*args, **kwargs):
if not handle.is_active(... | 11,242 | 39.588448 | 89 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/_initialize.py | import torch
from torch._six import string_classes
import functools
import numpy as np
import sys
from types import MethodType
import warnings
from ._amp_state import _amp_state, warn_or_err, container_abcs
from .handle import disable_casts
from .scaler import LossScaler
from ._process_optimizer import _process_optimiz... | 11,606 | 42.965909 | 111 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/frontend.py | import torch
from ._initialize import _initialize
from ._amp_state import _amp_state, warn_or_err, maybe_print
from collections import OrderedDict
class Properties(object):
"""
This class has two purposes: to establish a set of default properties,
and to route setting of these attributes through __setattr... | 21,267 | 47.009029 | 115 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/rnn_compat.py | from . import utils, wrap
import torch
_VF = torch._C._VariableFunctions
RNN_NAMES = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
def _gen_VF_wrapper(name):
def wrapper(*args, **kwargs):
return getattr(_VF, name)(*args, **kwargs)
return wrapper
# Some python magic to generate an object that has the rnn ce... | 1,995 | 35.962963 | 79 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/handle.py | import contextlib
import warnings
import sys
import torch
from . import utils
from .opt import OptimWrapper
from .scaler import LossScaler
from ._amp_state import _amp_state, master_params, maybe_print
if torch.distributed.is_available():
from ..parallel.LARC import LARC
# There's no reason to expose the notion... | 12,066 | 41.79078 | 118 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/lists/functional_overrides.py |
# TODO: think about the following two. They do weird things.
# - torch.nn.utils.clip_grad (but it should always be fp32 anyway)
# - torch.nn.utils.weight_norm
# Notes:
# F.instance_norm uses batch_norm internally. Which correctly handles
# fp16 in/out with fp32 weights. So we shouldn't do anything for
# either of... | 2,248 | 26.765432 | 96 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/lists/tensor_overrides.py | from .. import compat
from . import torch_overrides
import importlib
import torch
# if compat.variable_is_tensor() and not compat.tensor_is_variable():
MODULE = torch.Tensor
# else:
# MODULE = torch.autograd.Variable
FP16_FUNCS = compat.filter_attrs(MODULE, [
'__matmul__',
])
FP32_FUNCS = compat.filter_at... | 1,402 | 20.921875 | 72 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/amp/lists/torch_overrides.py | import torch
from .. import utils
MODULE = torch
FP16_FUNCS = [
# Low level functions wrapped by torch.nn layers.
# The wrapper layers contain the weights which are then passed in as a parameter
# to these functions.
'conv1d',
'conv2d',
'conv3d',
'conv_transpose1d',
'conv_transpose2d'... | 2,082 | 16.956897 | 84 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/fused_dense/fused_dense.py | import torch
from torch import nn
import fused_dense_cuda
from apex._autocast_utils import _cast_if_autocast_enabled
#implements fused GEMM+bias in forward pass using mlp_cuda from apex
class FusedDenseFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
ctx.save_for_back... | 4,078 | 41.489583 | 173 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/normalization/fused_layer_norm.py | import importlib
import numbers
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
from apex._autocast_utils import _cast_if_autocast_enabled
global fused_layer_norm_cuda
fused_layer_norm_cuda = None
# Reference implementation from Huggingface
def m... | 18,213 | 40.584475 | 114 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/fp16_utils/fp16_optimizer.py | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ..amp._amp_state import _amp_state, maybe_print
from ..amp.scaler import LossScaler
from ..multi_tensor_apply import multi_tensor... | 27,769 | 49.036036 | 425 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/fp16_utils/fp16util.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
... | 7,141 | 36.989362 | 337 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/fp16_utils/loss_scaler.py | import torch
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class LossScaler:
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP1... | 7,568 | 39.475936 | 326 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/multiproc.py | import torch
import sys
import subprocess
def docstring_hack():
"""
Multiproc file which will launch a set of processes locally for multi-gpu
usage: python -m apex.parallel.multiproc main.py ...
"""
pass
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in arg... | 884 | 23.583333 | 77 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/optimized_sync_batchnorm.py | import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
import syncbn
from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction
class SyncBatchNorm(_BatchNorm):
"""
synchronized batch normalization module extented from `torch.nn.BatchNormNd`
with the a... | 4,364 | 49.755814 | 252 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/optimized_sync_batchnorm_kernel.py | import torch
from torch.autograd.function import Function
import syncbn
from apex.parallel import ReduceOp
class SyncBatchnormFunction(Function):
@staticmethod
def forward(ctx, input, z, weight, bias, running_mean, running_variance, eps, track_running_stats = True, momentum = 1.0, process_group = None, chann... | 5,467 | 44.566667 | 189 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/LARC.py | import torch
from torch import nn
from torch.nn.parameter import Parameter
class LARC(object):
"""
:class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC,
in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive
local learning r... | 4,018 | 36.212963 | 225 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/distributed.py | import torch
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
from collections import OrderedDict
from itertools import chain
import copy
import importlib
from ..multi_tensor_apply import multi_tensor_applier
imported_flatten_impl = False
def import_flatten_impl... | 30,651 | 46.89375 | 496 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/__init__.py | import torch
if hasattr(torch.distributed, 'ReduceOp'):
ReduceOp = torch.distributed.ReduceOp
elif hasattr(torch.distributed, 'reduce_op'):
ReduceOp = torch.distributed.reduce_op
else:
ReduceOp = torch.distributed.deprecated.reduce_op
from .distributed import DistributedDataParallel, Reducer
# This is tri... | 3,667 | 37.208333 | 162 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/sync_batchnorm.py | import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
from .sync_batchnorm_kernel import SyncBatchnormFunction
from apex.parallel import ReduceOp
class SyncBatchNorm(_BatchNorm):
"""
synchronized batch normalization module extented from ``torch.nn.BatchNormNd``
... | 6,532 | 47.392593 | 228 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/parallel/sync_batchnorm_kernel.py | import torch
from torch.autograd.function import Function
from apex.parallel import ReduceOp
class SyncBatchnormFunction(Function):
@staticmethod
def forward(ctx, input, weight, bias, running_mean, running_variance, eps, process_group, world_size):
torch.cuda.nvtx.range_push("sync_BN_fw")
# ... | 3,761 | 41.75 | 106 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/RNN/cells.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .RNNBackend import RNNCell
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
import math
class mLSTMRNNCell(RNNCell):
"""
mLSTMRNNCell
"""
def __init__(self, input_size, hidden_size, bias = False, output_... | 2,550 | 29.011765 | 156 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/RNN/RNNBackend.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import math
def is_iterable(maybe_iterable):
return isinstance(maybe_iterable, list) or isinstance(maybe_iterable, tuple)
def flatten_list(tens_list):
"""
flatten_list
"""
if not is_iterable(... | 11,578 | 30.636612 | 126 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/RNN/models.py | import torch
from torch.nn._functions.rnn import LSTMCell, RNNReLUCell, RNNTanhCell, GRUCell
from .RNNBackend import bidirectionalRNN, stackedRNN, RNNCell
from .cells import mLSTMRNNCell, mLSTMCell
def toRNNBackend(inputRNN, num_layers, bidirectional=False, dropout = 0):
"""
:class:`toRNNBackend`
"""
... | 2,137 | 37.872727 | 129 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/multi_tensor_apply/multi_tensor_apply.py | import torch
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
import amp_C
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.availab... | 991 | 31 | 82 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/optimizers/fused_adagrad.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdagrad(torch.optim.Optimizer):
"""Implements Adagrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This... | 5,231 | 41.885246 | 145 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/optimizers/fused_novograd.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedNovoGrad(torch.optim.Optimizer):
"""Implements NovoGrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
Th... | 10,652 | 48.548837 | 149 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/optimizers/fused_mixed_precision_lamb.py | import torch
from copy import deepcopy
from itertools import chain
from collections import defaultdict, abc as container_abcs
from apex.multi_tensor_apply import multi_tensor_applier
class FusedMixedPrecisionLamb(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, step=0, bias_correction=True,
... | 11,231 | 42.70428 | 111 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/optimizers/fused_sgd.py | import torch
from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-... | 10,041 | 43.04386 | 145 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/optimizers/fused_lamb.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | 9,910 | 44.884259 | 145 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/optimizers/fused_adam.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | 8,483 | 42.731959 | 151 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/sparsity/asp.py | import types
import torch
from .sparse_masklib import create_mask
from .permutation_lib import Permutation
torchvision_imported=True
try:
import torchvision
except ImportError:
print("[ASP][Warning] torchvision cannot be imported.")
torchvision_imported=False
import json
import os
import string
import tim... | 19,277 | 59.432602 | 307 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/sparsity/sparse_masklib.py | import sys
import torch
import numpy as np
import collections
from itertools import permutations
""" compute density (helper fn to compute % NNZs in a tensor) """
def fill(x):
return float(x.nonzero().size(0))/torch.numel(x)
""" reshape matrix into m-dimensional vectors: (h,w) -> (hw/m, m) """
def reshape_1d(mat... | 7,433 | 38.542553 | 103 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/sparsity/permutation_lib.py | import os
import torch
import json
import string
import time
try:
from .permutation_search_kernels import accelerated_search_for_good_permutation, sum_after_2_to_4
print("[ASP][Info] permutation_search_kernels can be imported.")
except ImportError:
print("[ASP][Warning] permutation_search_kernels cannot be ... | 72,979 | 77.642241 | 329 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/sparsity/test/checkpointing_test_reference.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
#
# Reference run for checkpointing test (part1 + part2)
#
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d... | 3,182 | 31.814433 | 125 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/sparsity/test/toy_problem.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | 3,217 | 35.568182 | 104 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/sparsity/test/checkpointing_test_part2.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | 3,131 | 38.15 | 151 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/sparsity/test/checkpointing_test_part1.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | 3,358 | 34.357895 | 151 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/clip_grad/clip_grad.py | import torch
from torch._six import inf
from typing import Union, Iterable
_kernel_import_succeeded = False
try:
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
_kernel_import_succeeded = True
except:
_kernel_import_succeeded = False
_tensor_or_tensors = Union[torch.Tensor, Itera... | 4,373 | 33.171875 | 87 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/transducer/transducer.py | import torch
import transducer_loss_cuda
import transducer_joint_cuda
class TransducerJoint(torch.nn.Module):
"""Transducer joint
Detail of this loss function can be found in: Sequence Transduction with Recurrent Neural
Networks
Arguments:
pack_output (bool, optional): whether to pack the out... | 9,934 | 49.688776 | 102 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/transducer/_transducer_ref.py | import torch
def transducer_loss_reference(x, label, f_len, y_len, blank_idx, loss_grad):
def log_sum_exp(a, b):
if (a >= b):
return a + torch.log(1 + torch.exp(b-a))
else:
return b + torch.log(1 + torch.exp(a-b))
def forward_alpha(x, label, f_len, y_len, blank_idx):
... | 4,621 | 41.018182 | 104 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/groupbn/batch_norm.py | import torch
import numpy as np
from torch.nn.modules.batchnorm import _BatchNorm
import bnp
class bn_NHWC_impl(torch.autograd.Function):
@staticmethod
def forward(ctx, x, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, is_train, bn_group, my_data, pair_data, magic, pair_data2, pair_data3, ... | 11,208 | 48.597345 | 229 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/groupbn/__init__.py | try:
import torch
import bnp
from .batch_norm import BatchNorm2d_NHWC
del torch
del bnp
del batch_norm
except ImportError as err:
print("apex was installed without --bnp flag, contrib.groupbn is not available")
| 239 | 23 | 84 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/examples/multihead_attn/func_test_multihead_attn.py | import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | 5,740 | 51.669725 | 164 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/examples/multihead_attn/perf_test_multihead_attn.py | import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | 6,163 | 52.137931 | 157 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/bottleneck/test.py | import torch
from bottleneck import Bottleneck
torch.manual_seed(23337)
# use True to print layerwise sum for all outputs in reference code path
DEBUG = False#True
for stride, o_channel in [(1,32), (1,128), (2,32)]:
print("testing stride ==", stride, ", in_channel == 32 , out_channel ==", o_channel)
a_ = torc... | 3,070 | 41.652778 | 131 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/bottleneck/bottleneck.py | import functools as func
import torch
import torch.distributed as dist
from torch import nn
from apex import check_cudnn_version_and_warn
import fast_bottleneck
import nccl_p2p_cuda as inc
assert check_cudnn_version_and_warn(__name__, 8400)
def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu... | 36,558 | 47.745333 | 222 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/bottleneck/halo_exchangers.py | import torch
import torch.distributed as dist
from torch import nn
import nccl_p2p_cuda as inc
import peer_memory_cuda as pm
# Communication free halo exchanger.
# NB! This halo exchanger does not exchange halos with neighbors as it should, it merely swaps the inputs
# NB! This is only useful for performance testing.
... | 9,599 | 54.813953 | 216 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/cudnn_gbn/batch_norm.py | import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
from torch import Tensor
import peer_memory_cuda as pm
import cudnn_gbn_lib
from torch.cuda.amp import custom_fwd, custom_bwd
class _GroupBatchNorm2d(torch.autograd.Function):
@staticmethod
@custom_fwd
def ... | 6,725 | 45.386207 | 144 | py |
TokenMixup | TokenMixup-main/experiments/apex_copy/apex/contrib/test/clip_grad/test_clip_grad.py | import random
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.clip_grad import clip_grad_norm_
except ImportError as e:
SKIP_TEST = e
def make_params(
num_params,
sizes=[1,2,3,4,5],
num_dims=[1,2,3],
dtypes=[torch.float32],
devices=['cuda'],
... | 4,983 | 27.809249 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.