repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/transducer/test_transducer_joint.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.transducer import TransducerJoint from apex.contrib.transducer import _transducer_ref as transducer_ref except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class TransducerJointTest(unittest.TestCase): ...
7,232
42.053571
103
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/transducer/test_transducer_loss.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.transducer import TransducerLoss from apex.contrib.transducer import _transducer_ref as transducer_ref except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class TransducerLossTest(unittest.TestCase): ...
6,972
48.807143
100
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/bottleneck/test_bottleneck_module.py
import unittest import torch from torch.testing._internal import common_utils from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase SKIP_TEST = None try: from apex.contrib.bottleneck import Bottleneck, SpatialBottleneck from apex.contrib.bottleneck import HaloExchangerPeer fro...
11,597
34.46789
117
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/fused_dense/test_fused_dense.py
import unittest import os import torch from torch.testing._internal import common_utils from torch.testing._internal.common_device_type import instantiate_device_type_tests SKIP_TEST = None try: from apex import fused_dense except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") c...
2,057
31.666667
95
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/cudnn_gbn/test_cudnn_gbn_with_two_gpus.py
import copy import typing import unittest import torch import torch.nn as nn from torch.testing._internal import common_utils SKIP_TEST = None from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase try: from apex.contrib.cudnn_gbn import GroupBatchNorm2d as GBN except ImportError as e:...
4,902
32.128378
114
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/xentropy/test_label_smoothing.py
import unittest import random import time import numpy as np import torch SKIP_TEST = None try: from apex.contrib import xentropy as label_smoothing except ImportError as e: SKIP_TEST = e def label_smoothing_raw(x, target, padding_idx, smoothing): logprobs = torch.nn.functional.log_softmax(x, dim=-1, d...
4,852
34.423358
85
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/conv_bias_relu/test_conv_bias_relu.py
import copy import math import random import unittest import torch import torch.nn.functional as F HAS_CONV_BIAS_RELU = None try: from apex.contrib.conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU except ImportError as e: HAS_CONV_BIAS_RELU = False else: HAS_CONV_BIAS_RELU = True @unittest...
5,275
48.308411
147
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/multihead_attn/test_encdec_multihead_attn_norm_add.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.multihead_attn import EncdecMultiheadAttn except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class EncdecMultiheadAttnNormAddTest(unittest.TestCase): def setUp(self, seed=1234): torch.manual_seed...
4,036
45.94186
110
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/multihead_attn/test_fast_self_multihead_attn_bias.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.multihead_attn import SelfMultiheadAttn except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class SelfMultiheadAttnTest(unittest.TestCase): def setUp(self, seed=1234): torch.manual_seed(seed) ...
3,730
43.416667
108
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/multihead_attn/test_self_multihead_attn.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.multihead_attn import SelfMultiheadAttn except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class SelfMultiheadAttnTest(unittest.TestCase): def setUp(self, seed=1234): torch.manual_seed(seed) ...
6,689
47.832117
147
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/multihead_attn/test_encdec_multihead_attn.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.multihead_attn import EncdecMultiheadAttn except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class EncdecMultiheadAttnTest(unittest.TestCase): def setUp(self, seed=1234): torch.manual_seed(seed) ...
7,468
51.598592
152
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/multihead_attn/test_self_multihead_attn_norm_add.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.multihead_attn import SelfMultiheadAttn except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class SelfMultiheadAttnNormAddTest(unittest.TestCase): def setUp(self, seed=1234): torch.manual_seed(see...
3,430
41.8875
108
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/multihead_attn/test_mha_fused_softmax.py
import unittest import torch import torch.nn.functional as F SKIP_TEST = None try: from apex.contrib.multihead_attn import fast_mask_softmax_dropout_func except ImportError as e: SKIP_TEST = e @unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}") class FusedSoftmaxTest(unittest.TestCase): def setUp(self, seed=123...
1,891
36.098039
108
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/focal_loss/test_focal_loss.py
import unittest import torch import torch.nn.functional as F reference_available = True try: from torchvision.ops.focal_loss import sigmoid_focal_loss except ImportError: reference_available = False SKIP_TEST = None try: from apex.contrib.focal_loss import focal_loss except ImportError as e: SKIP_TES...
2,253
29.053333
135
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/layer_norm/test_fast_layer_norm.py
import unittest import torch SKIP_TEST = None try: from apex.contrib.layer_norm.layer_norm import FastLayerNorm import fast_layer_norm as fln except ImportError as e: SKIP_TEST = e class GPUTimer: def __init__(self, stream): self.start_ = torch.cuda.Event(enable_timing=True) self.sto...
8,127
28.028571
96
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/optimizers/test_dist_adam.py
from contextlib import contextmanager import io import unittest import torch from torch.testing._internal import common_utils SKIP_TEST = None try: from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam except ImportError as e: SKIP_TEST = e from apex.transformer.testing.distributed_t...
15,196
32.771111
87
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/peer_memory/test_peer_halo_exchange_module.py
import unittest import torch from torch.testing._internal import common_utils SKIP_TEST = None from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase try: from apex.contrib.peer_memory import PeerMemoryPool, PeerHaloExchanger1d except ImportError as e: SKIP_TEST = e # How to run: ...
10,139
29.820669
111
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/index_mul_2d/test_index_mul_2d.py
import random import unittest import torch HAS_INDEX_MUL_2D_RELU = None try: from apex.contrib.index_mul_2d import index_mul_2d except ImportError as e: HAS_INDEX_MUL_2D_RELU = False else: HAS_INDEX_MUL_2D_RELU = True @unittest.skipIf(not HAS_INDEX_MUL_2D_RELU, "`apex.contrib.index_mul_2d` is not found....
4,377
40.695238
126
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/test/fmha/test_fmha.py
############################################################################### # Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistribution...
5,412
35.086667
90
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/xentropy/softmax_xentropy.py
import torch import xentropy_cuda class SoftmaxCrossEntropyLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False): losses, max_log_sum_exp = xentropy_cuda.forward( logits, labels, smoothing, half_to_float) l...
1,025
32.096774
88
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/conv_bias_relu/conv_bias_relu.py
import pdb import torch from torch.autograd import gradcheck from apex import check_cudnn_version_and_warn import fused_conv_bias_relu check_cudnn_version_and_warn(__name__, 8400) class ConvBiasReLU_(torch.autograd.Function): @staticmethod @torch.cuda.amp.custom_fwd(cast_inputs=torch.half) def forward(...
2,493
29.414634
93
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/fast_encdec_multihead_attn_func.py
import torch import fast_multihead_attn class FastEncdecAttnFunc(torch.autograd.Function): @staticmethod def forward( ctx, use_time_mask, is_training, heads, inputs_q, inputs_kv, input_weights_q, input_weights_kv, output_weights, ...
2,974
23.385246
63
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/fast_self_multihead_attn_norm_add_func.py
import torch import fast_multihead_attn class FastSelfAttnNormAddFunc(torch.autograd.Function): @staticmethod def forward( ctx, use_time_mask, is_training, heads, inputs, lyr_nrm_gamma_weights, lyr_nrm_beta_weights, input_weights, output...
3,492
24.683824
71
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/fast_self_multihead_attn_func.py
import torch import fast_multihead_attn class FastSelfAttnFunc(torch.autograd.Function): @staticmethod def forward( ctx, use_time_mask, is_training, heads, inputs, input_weights, output_weights, input_biases, output_biases, pad_m...
7,679
30.47541
106
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/fast_encdec_multihead_attn_norm_add_func.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch import fast_multihea...
4,258
25.61875
78
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/self_multihead_attn.py
import math import torch from torch import nn from torch.nn import Parameter import torch.nn.functional as F from .self_multihead_attn_func import self_attn_func from .fast_self_multihead_attn_func import fast_self_attn_func from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func from apex.no...
10,097
38.6
118
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/encdec_multihead_attn_func.py
import torch import torch.nn.functional as F class EncdecAttnFunc(torch.autograd.Function): @staticmethod def forward( ctx, use_time_mask, is_training, heads, scale, inputs_q, inputs_kv, input_weights_q, input_weights_kv, output_w...
16,844
46.184874
131
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/mask_softmax_dropout_func.py
import torch import fast_multihead_attn class MaskSoftmaxDropout(torch.autograd.Function): @staticmethod def forward(ctx, is_training, heads, inputs, pad_mask, mask_additive, dropout_prob): heads_t = torch.tensor([heads]) dropout_prob_t = torch.tensor([dropout_prob]) null_tensor = tor...
2,456
36.8
119
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/encdec_multihead_attn.py
import math import torch from torch import nn from torch.nn import Parameter import torch.nn.functional as F from .encdec_multihead_attn_func import encdec_attn_func from .fast_encdec_multihead_attn_func import fast_encdec_attn_func from .fast_encdec_multihead_attn_norm_add_func import fast_encdec_attn_norm_add_func ...
7,546
38.931217
118
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/multihead_attn/self_multihead_attn_func.py
import torch import torch.nn.functional as F class SelfAttnFunc(torch.autograd.Function): @staticmethod def forward( ctx, use_time_mask, is_training, heads, scale, inputs, input_weights, output_weights, input_biases, output_biases...
14,144
44.776699
133
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/focal_loss/focal_loss.py
import torch import focal_loss_cuda class FocalLoss(torch.autograd.Function): @staticmethod def forward( ctx, cls_output, cls_targets_at_level, num_positives_sum, num_real_classes, alpha, gamma, label_smoothing=0.0, ): loss, partial_...
1,499
23.590164
89
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/focal_loss/__init__.py
try: import torch import focal_loss_cuda from .focal_loss import focal_loss del torch del focal_loss_cuda del focal_loss except ImportError as err: print("apex was installed without --focal_loss flag, apex.contrib.focal_loss is not available")
272
26.3
99
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/layer_norm/layer_norm.py
import torch from torch.nn import init from apex._autocast_utils import _cast_if_autocast_enabled import fast_layer_norm class FastLayerNormFN(torch.autograd.Function): @staticmethod def forward(ctx, x, gamma, beta, epsilon): x = x.contiguous() gamma = gamma.contiguous() beta = beta.c...
1,737
31.185185
91
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/optimizers/distributed_fused_adam.py
import collections import contextlib import enum import inspect import io import itertools import threading import torch from torch.distributed.distributed_c10d import _get_default_group, _get_global_rank from apex.multi_tensor_apply import multi_tensor_applier import amp_C import distributed_adam_cuda _FOUND_DEPRECA...
59,432
40.416725
96
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/optimizers/fp16_optimizer.py
import torch from apex.multi_tensor_apply import multi_tensor_applier class FP16_Optimizer(object): """ :class:`FP16_Optimizer` A cutdown version of apex.fp16_utils.FP16_Optimizer. Designed only to wrap apex.contrib.optimizers.FusedAdam, FusedSGD. Refer to apex.fp16_utils documents for more information...
10,448
41.82377
126
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/optimizers/fused_sgd.py
import types import torch from torch.optim.optimizer import Optimizer, required from apex.multi_tensor_apply import multi_tensor_applier class FusedSGD(Optimizer): r"""Implements stochastic gradient descent (optionally with momentum). This version of fused SGD implements 2 fusions. * Fusion of the SGD ...
9,468
43.665094
145
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/optimizers/fused_lamb.py
import torch import importlib import math from apex.multi_tensor_apply import multi_tensor_applier class FusedLAMB(torch.optim.Optimizer): """Implements LAMB algorithm. Currently GPU-only. Requires Apex to be installed via ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cu...
9,408
44.019139
145
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/optimizers/fused_adam.py
import types import torch import importlib from apex.multi_tensor_apply import multi_tensor_applier class FusedAdam(torch.optim.Optimizer): """Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam:...
9,284
43.855072
145
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/optimizers/distributed_fused_lamb.py
import os import math import torch import importlib import amp_C from apex.multi_tensor_apply import multi_tensor_applier import torch.distributed.distributed_c10d as c10d _make_nccl_premul_sum = getattr(torch.distributed, "_make_nccl_premul_sum", None) # Ref: https://github.com/pytorch/pytorch/pull/81272 if _make_nc...
53,733
53.441743
262
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/peer_memory/peer_halo_exchanger_1d.py
import torch from apex.contrib.peer_memory import PeerMemoryPool import peer_memory_cuda as pm class PeerHaloExchanger1d: def __init__(self, ranks, rank_in_group, peer_pool, half_halo): self.peer_group_size = len(ranks) self.ranks = ranks self.peer_rank = rank_in_group self.low_neig...
3,995
59.545455
131
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/peer_memory/peer_memory.py
import torch import numpy as np import peer_memory_cuda as pm class PeerMemoryPool(object): def __init__(self, static_size, dynamic_size, peer_ranks=None): rank = torch.distributed.get_rank() world_size = torch.distributed.get_world_size() ngpus = min(torch.cuda.device_count(), world_size)...
4,748
52.965909
165
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/index_mul_2d/index_mul_2d.py
import torch import fused_index_mul_2d class IndexMul2d_(torch.autograd.Function): ''' Currently only support index in dimension 0 with a 2-dimension tensor. The shape of indexed in1 must be same with in2. Now this kernel does not support broadcast. The datatype must be float32 or float16. ''' ...
4,594
30.689655
115
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/contrib/fmha/fmha.py
############################################################################### # Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributio...
3,577
45.467532
122
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/utils.py
"""Utility functions used by both `pipeline_parallel` and `tensor_parallel`""" import torch from apex.transformer import parallel_state def ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, "{} is not divisible by {}...
1,576
31.183673
82
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/parallel_state.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
28,418
40.609078
184
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/testing/arguments.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
50,546
51.003086
111
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/testing/standalone_gpt.py
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
3,935
34.142857
118
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/testing/commons.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
9,603
31.228188
108
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/testing/global_vars.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
8,862
31.704797
100
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/testing/standalone_transformer_lm.py
# coding=utf-8 # Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless...
60,815
37.613333
136
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/testing/standalone_bert.py
import contextlib import torch from apex.transformer import tensor_parallel from apex.transformer.enums import AttnMaskType from apex.transformer.enums import ModelType from apex.transformer.layers import FusedLayerNorm as LayerNorm from apex.transformer.testing.global_vars import get_args from apex.transformer.testi...
9,945
37.851563
104
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/testing/distributed_test_base.py
import os import sys import unittest from packaging.version import Version, parse import torch from torch import distributed as dist from torch.utils import collect_env from torch.testing._internal import common_utils from torch.testing._internal import common_distributed HAS_TORCH_UCC = None try: import torch_uc...
4,029
29.763359
127
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/amp/grad_scaler.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appli...
5,335
43.466667
118
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/_data/_batchsampler.py
"""BatchSampler implementations for POC of dynamic batch size or rampup_batch_size support. Implementations are based on https://github.com/NVIDIA/Megatron-LM/blob/bcd605f8570ebeeb0436c115ebbfafc3c5a40ae5/megatron/data/data_samplers.py. """ # NOQA import abc import torch __all__ = [ "MegatronPretrainingSampler...
7,203
38.801105
145
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/layers/layer_norm.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # NOTE(mkozuki): This file defines two LayerNorm that are compatible with Megatron-LM. # while avoiding introducing the breaking change of `"sequence_parallel_enabled"` attribute into apex.normalization.FusedLayerNorm # and apex.contrib.layer_norm.FastLaye...
3,456
33.57
141
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/pipeline_parallel/_timers.py
import time import torch class _Timer: """Timer.""" def __init__(self, name): self.name_ = name self.elapsed_ = 0.0 self.started_ = False self.start_time = time.time() def start(self): """Start the timer.""" assert not self.started_, "timer has already be...
2,538
29.22619
88
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/pipeline_parallel/utils.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
12,563
34.094972
102
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/pipeline_parallel/p2p_communication.py
# coding=utf-8 # Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless...
23,172
39.022453
199
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_with_interleaving.py
from typing import List, Union, Optional, Sequence import warnings import torch from apex.transformer import parallel_state from apex.transformer.pipeline_parallel import p2p_communication from apex.transformer.pipeline_parallel.schedules.common import Batch from apex.transformer.pipeline_parallel.schedules.common im...
18,475
43.413462
119
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/pipeline_parallel/schedules/fwd_bwd_no_pipelining.py
import contextlib from typing import List, Union, Optional import torch from apex.transformer.pipeline_parallel.utils import listify_model from apex.transformer.pipeline_parallel.utils import get_num_microbatches from apex.transformer.pipeline_parallel.utils import get_kth_microbatch from apex.transformer.pipeline_pa...
4,663
36.312
103
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/pipeline_parallel/schedules/common.py
from typing import Any, Callable, Dict, List, Tuple, Union, Optional, Sequence import torch from torch.autograd.variable import Variable from apex.normalization.fused_layer_norm import FusedLayerNorm from apex.transformer import parallel_state from apex.transformer.enums import ModelType from apex.transformer.pipelin...
15,542
37.954887
136
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_without_interleaving.py
import contextlib from typing import Union, List, Optional, Sequence import warnings import torch from apex.transformer import parallel_state from apex.transformer.enums import ModelType from apex.transformer.pipeline_parallel import p2p_communication from apex.transformer.pipeline_parallel.p2p_communication import F...
20,328
38.245174
152
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/tensor_parallel/memory.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
5,203
33.236842
85
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/tensor_parallel/mappings.py
# coding=utf-8 # Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless...
10,372
33.009836
111
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/tensor_parallel/cross_entropy.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
6,446
46.755556
115
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/tensor_parallel/utils.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
2,396
35.876923
112
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/tensor_parallel/data.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
4,047
31.910569
85
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/tensor_parallel/layers.py
# coding=utf-8 # Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless...
28,979
36.106274
141
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/tensor_parallel/random.py
# coding=utf-8 # Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless...
11,852
36.990385
106
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/transformer/functional/fused_softmax.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless re...
9,931
36.479245
141
py
TokenMixup
TokenMixup-main/experiments/apex_copy/apex/mlp/mlp.py
from copy import copy import math import torch from torch import nn from apex._autocast_utils import _cast_if_autocast_enabled import mlp_cuda class MlpFunction(torch.autograd.Function): @staticmethod def forward(ctx, bias, activation, *args): output = mlp_cuda.forward(bias, activation, args) ...
2,757
30.701149
115
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/_autocast_utils.py
from typing import Optional, Sequence import torch __all__ = ["_cast_if_autocast_enabled"] def _get_autocast_dtypes() -> Sequence[torch.dtype]: if torch.cuda.is_bf16_supported(): return [torch.half, torch.bfloat16] return [torch.half] def _get_current_dtype(dtype: Optional[torch.dtype] = None) ->...
664
23.62963
87
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/__init__.py
import logging import warnings # May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten import torch if torch.distributed.is_available(): from . import parallel from . import amp from . import fp16_utils # For optimizers and normalization the...
2,034
38.134615
170
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/scaler.py
import torch from ..multi_tensor_apply import multi_tensor_applier from ._amp_state import _amp_state, master_params, maybe_print from itertools import product def scale_check_overflow_python(model_grad, master_grad, scale, check_overflow=False): # Exception handling for 18.04 compatibility if check_overflow: ...
10,494
47.142202
110
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/_process_optimizer.py
import types from ..fp16_utils import master_params_to_model_params from ..multi_tensor_apply import multi_tensor_applier from ._amp_state import maybe_print import torch from ..optimizers import FusedSGD class AmpOptimizerState(object): def __init__(self): pass def _master_params_to_model_params(self):...
20,747
41.342857
115
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/_amp_state.py
# This is a "header object" that allows different amp modules to communicate. # I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. # But apparently it's ok: # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm import os import torch TORCH_MAJOR = int(torc...
2,008
27.7
92
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/utils.py
from . import compat import functools import itertools import torch def is_cuda_enabled(): return torch.version.cuda is not None def get_cuda_version(): return tuple(int(x) for x in torch.version.cuda.split('.')) def is_fp_tensor(x): if is_nested(x): # Fast-fail version of all(is_fp_tensor) ...
7,222
33.232227
86
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/opt.py
import contextlib import warnings from .scaler import LossScaler, master_params from ._amp_state import maybe_print import numpy as np class OptimWrapper(object): def __init__(self, optimizer, amp_handle, num_loss): self._optimizer = optimizer self._amp_handle = amp_handle self._num_loss ...
3,446
32.144231
80
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/amp.py
from . import compat, rnn_compat, utils, wrap from .handle import AmpHandle, NoOpHandle from .lists import functional_overrides, torch_overrides, tensor_overrides from ._amp_state import _amp_state from .frontend import * import functools import itertools import torch _DECORATOR_HANDLE = None _USER_CAST_REGISTRY = ...
7,266
39.825843
101
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/compat.py
import torch # True for post-0.4, when Variables/Tensors merged. def variable_is_tensor(): v = torch.autograd.Variable() return isinstance(v, torch.Tensor) def tensor_is_variable(): x = torch.Tensor() return type(x) == torch.autograd.Variable # False for post-0.4 def tensor_is_float_tensor(): x =...
1,393
28.659574
77
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/wrap.py
from . import compat from . import utils from ._amp_state import _amp_state from . import rnn_compat import functools import torch def make_cast_wrapper(orig_fn, cast_fn, handle, try_caching=False): @functools.wraps(orig_fn) def wrapper(*args, **kwargs): if not handle.is_active(...
11,242
39.588448
89
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/_initialize.py
import torch from torch._six import string_classes import functools import numpy as np import sys from types import MethodType import warnings from ._amp_state import _amp_state, warn_or_err, container_abcs from .handle import disable_casts from .scaler import LossScaler from ._process_optimizer import _process_optimiz...
11,606
42.965909
111
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/frontend.py
import torch from ._initialize import _initialize from ._amp_state import _amp_state, warn_or_err, maybe_print from collections import OrderedDict class Properties(object): """ This class has two purposes: to establish a set of default properties, and to route setting of these attributes through __setattr...
21,267
47.009029
115
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/rnn_compat.py
from . import utils, wrap import torch _VF = torch._C._VariableFunctions RNN_NAMES = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm'] def _gen_VF_wrapper(name): def wrapper(*args, **kwargs): return getattr(_VF, name)(*args, **kwargs) return wrapper # Some python magic to generate an object that has the rnn ce...
1,995
35.962963
79
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/handle.py
import contextlib import warnings import sys import torch from . import utils from .opt import OptimWrapper from .scaler import LossScaler from ._amp_state import _amp_state, master_params, maybe_print if torch.distributed.is_available(): from ..parallel.LARC import LARC # There's no reason to expose the notion...
12,066
41.79078
118
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/lists/functional_overrides.py
# TODO: think about the following two. They do weird things. # - torch.nn.utils.clip_grad (but it should always be fp32 anyway) # - torch.nn.utils.weight_norm # Notes: # F.instance_norm uses batch_norm internally. Which correctly handles # fp16 in/out with fp32 weights. So we shouldn't do anything for # either of...
2,248
26.765432
96
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/lists/tensor_overrides.py
from .. import compat from . import torch_overrides import importlib import torch # if compat.variable_is_tensor() and not compat.tensor_is_variable(): MODULE = torch.Tensor # else: # MODULE = torch.autograd.Variable FP16_FUNCS = compat.filter_attrs(MODULE, [ '__matmul__', ]) FP32_FUNCS = compat.filter_at...
1,402
20.921875
72
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/amp/lists/torch_overrides.py
import torch from .. import utils MODULE = torch FP16_FUNCS = [ # Low level functions wrapped by torch.nn layers. # The wrapper layers contain the weights which are then passed in as a parameter # to these functions. 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d'...
2,082
16.956897
84
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/fused_dense/fused_dense.py
import torch from torch import nn import fused_dense_cuda from apex._autocast_utils import _cast_if_autocast_enabled #implements fused GEMM+bias in forward pass using mlp_cuda from apex class FusedDenseFunc(torch.autograd.Function): @staticmethod def forward(ctx, input, weight, bias): ctx.save_for_back...
4,078
41.489583
173
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/normalization/fused_layer_norm.py
import importlib import numbers import torch from torch.nn.parameter import Parameter from torch.nn import init from torch.nn import functional as F from apex._autocast_utils import _cast_if_autocast_enabled global fused_layer_norm_cuda fused_layer_norm_cuda = None # Reference implementation from Huggingface def m...
18,213
40.584475
114
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/fp16_utils/fp16_optimizer.py
import torch from torch import nn from torch.autograd import Variable from torch.nn.parameter import Parameter from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from ..amp._amp_state import _amp_state, maybe_print from ..amp.scaler import LossScaler from ..multi_tensor_apply import multi_tensor...
27,769
49.036036
425
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/fp16_utils/fp16util.py
import torch import torch.nn as nn from torch.autograd import Variable from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors class tofp16(nn.Module): """ Utility module that implements:: def forward(self, input): return input.half() """ def __init__(self): ...
7,141
36.989362
337
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/fp16_utils/loss_scaler.py
import torch # item() is a recent addition, so this helps with backward compatibility. def to_python_float(t): if hasattr(t, 'item'): return t.item() else: return t[0] class LossScaler: """ Class that manages a static loss scale. This class is intended to interact with :class:`FP1...
7,568
39.475936
326
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/parallel/multiproc.py
import torch import sys import subprocess def docstring_hack(): """ Multiproc file which will launch a set of processes locally for multi-gpu usage: python -m apex.parallel.multiproc main.py ... """ pass argslist = list(sys.argv)[1:] world_size = torch.cuda.device_count() if '--world-size' in arg...
884
23.583333
77
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/parallel/optimized_sync_batchnorm.py
import torch from torch.nn.modules.batchnorm import _BatchNorm from torch.nn import functional as F import syncbn from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction class SyncBatchNorm(_BatchNorm): """ synchronized batch normalization module extented from `torch.nn.BatchNormNd` with the a...
4,364
49.755814
252
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/parallel/optimized_sync_batchnorm_kernel.py
import torch from torch.autograd.function import Function import syncbn from apex.parallel import ReduceOp class SyncBatchnormFunction(Function): @staticmethod def forward(ctx, input, z, weight, bias, running_mean, running_variance, eps, track_running_stats = True, momentum = 1.0, process_group = None, chann...
5,467
44.566667
189
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/parallel/LARC.py
import torch from torch import nn from torch.nn.parameter import Parameter class LARC(object): """ :class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC, in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive local learning r...
4,018
36.212963
225
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/parallel/distributed.py
import torch import torch.distributed as dist from torch.nn.modules import Module from torch.autograd import Variable from collections import OrderedDict from itertools import chain import copy import importlib from ..multi_tensor_apply import multi_tensor_applier imported_flatten_impl = False def import_flatten_impl...
30,651
46.89375
496
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/parallel/__init__.py
import torch if hasattr(torch.distributed, 'ReduceOp'): ReduceOp = torch.distributed.ReduceOp elif hasattr(torch.distributed, 'reduce_op'): ReduceOp = torch.distributed.reduce_op else: ReduceOp = torch.distributed.deprecated.reduce_op from .distributed import DistributedDataParallel, Reducer # This is tri...
3,667
37.208333
162
py
TokenMixup
TokenMixup-main/experiments/apex_copy/build/lib/apex/parallel/sync_batchnorm.py
import torch from torch.nn.modules.batchnorm import _BatchNorm from torch.nn import functional as F from .sync_batchnorm_kernel import SyncBatchnormFunction from apex.parallel import ReduceOp class SyncBatchNorm(_BatchNorm): """ synchronized batch normalization module extented from ``torch.nn.BatchNormNd`` ...
6,532
47.392593
228
py