repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/pooling.py | from .collections import OrderedDict
from .utility import Utility
# Work in progress.
#poolFuncs = ["max_pool2d_with_indices_forward", "max_pool2d_with_indices"]
class MaxPool2d(object):
def parse(marker):
def convert2Tuple(arg):
assert (arg['type'] in ["int", "tuple"])
if arg['type'] == "int":
return ... | 1,459 | 23.333333 | 75 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/reduction.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Mean(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self... | 2,788 | 17.470199 | 76 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/base.py | from abc import ABC, abstractmethod
class OperatorLayerBase(ABC):
"""
Base class for all layers and operators.
Every derived class should have the following functions.
"""
@abstractmethod
def tc(self):
"""
Tensor core usage by the kernel.
Return "1" (yes), "0" (no, but possible), "-" (not applicable)
""... | 742 | 14.479167 | 64 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/randomSample.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class RandPerm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
... | 817 | 17.590909 | 64 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/softmax.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Softmax(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
s... | 2,190 | 17.887931 | 61 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/activation.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Activation(OperatorLayerBase):
"""
This class handles the various activation functions.
"""
ops = ["celu", "elu", "elu_", "hardshrink", "hardtanh", "hardtanh_", "leaky_relu", "leaky_relu_", "logsigmoid", "pr... | 1,520 | 22.045455 | 272 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/prof.py | #!/usr/bin/env python3
"""
This script reads the output (Python dictionary) created by parse.py.
For every kernel (line) in the input it determines
module / class name e.g. torch.nn.functional
operator name e.g. linear
kernel parameters e.g. GEMM M, N, K, datatype
bytes
flops
tensor core usage
direction (fprop,... | 4,967 | 18.330739 | 108 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/loss.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
#TODO: Add support for additional loss functions.
class MSELoss(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.ma... | 1,716 | 19.2 | 78 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/index_slice_join_mutate.py | from collections import OrderedDict
from .utility import Utility
import numpy as np
from .base import OperatorLayerBase
class Cat(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
se... | 8,004 | 18.059524 | 94 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/linear.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Linear(OperatorLayerBase):
'''
Notes:
If the bias occurs before the GEMM, then its 1 write (bias expansion).
If the bias occurs after, then its 1 read and 1 write.
bias in bprop is a reduction and hence is ... | 4,426 | 22.42328 | 133 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/dropout.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Dropout(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
s... | 999 | 18.607843 | 59 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/conv.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Conv(OperatorLayerBase):
"""
# N = batch size
# C,H,W = input channels, height, width
# K,P,Q = output channels, height, width
# R,S = filter height, width
# g = groups
"""
#todo: refine winograd and FF... | 6,355 | 25.818565 | 292 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/pyprof/prof/blas.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
import numpy as np
TC_GEMMS = ["884gemm", "1688gemm"]
class Addmm(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.... | 6,773 | 18.865103 | 96 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/multi_tensor_apply/multi_tensor_apply.py | import torch
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
import amp_C
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.availab... | 991 | 31 | 82 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/optimizers/fused_adagrad.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdagrad(torch.optim.Optimizer):
"""Implements Adagrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This... | 5,231 | 41.885246 | 145 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/optimizers/fused_novograd.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedNovoGrad(torch.optim.Optimizer):
"""Implements NovoGrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
Th... | 10,116 | 46.947867 | 145 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/optimizers/fused_sgd.py | import torch
from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-... | 10,041 | 43.04386 | 145 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/optimizers/fused_lamb.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | 9,910 | 44.884259 | 145 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/optimizers/fused_adam.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | 7,661 | 43.289017 | 151 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/sparsity/asp.py | import types
import torch
from .sparse_masklib import create_mask
torchvision_imported=True
try:
import torchvision
except ImportError:
print("[ASP][Warning] torchvision cannot be imported.")
torchvision_imported=False
def eligible_modules(model, whitelist_layer_types, allowed_layer_names, disallowed_laye... | 11,740 | 52.857798 | 193 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/sparsity/sparse_masklib.py | import sys
import torch
import numpy as np
import collections
from itertools import permutations
""" compute density (helper fn to compute % NNZs in a tensor) """
def fill(x):
return float(x.nonzero().size(0))/torch.numel(x)
""" reshape matrix into m-dimensional vectors: (h,w) -> (hw/m, m) """
def reshape_1d(mat... | 7,291 | 38.416216 | 103 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/sparsity/test/checkpointing_test_reference.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
#
# Reference run for checkpointing test (part1 + part2)
#
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d... | 3,177 | 31.762887 | 125 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/sparsity/test/toy_problem.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | 3,217 | 35.568182 | 104 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/sparsity/test/checkpointing_test_part2.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | 3,131 | 38.15 | 151 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/sparsity/test/checkpointing_test_part1.py | from collections import OrderedDict
import torch
from apex.optimizers import FusedAdam
from apex.contrib.sparsity import ASP
def build_model(args):
od = OrderedDict()
for i in range(args.num_layers):
if i == 0:
od['linear_layer_%d' % (i+1)] = torch.nn.Linear(args.input_features, args.hidde... | 3,353 | 34.305263 | 151 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/groupbn/batch_norm.py | import torch
import numpy as np
from torch.nn.modules.batchnorm import _BatchNorm
import bnp
class bn_NHWC_impl(torch.autograd.Function):
@staticmethod
def forward(ctx, x, s, b, rm, riv, mini_m, mini_riv, ret_cta, mom, epsilon, fuse_relu, is_train, bn_group, my_data, pair_data, magic, pair_data2, pair_data3, ... | 11,208 | 48.597345 | 229 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/groupbn/__init__.py | try:
import torch
import bnp
from .batch_norm import BatchNorm2d_NHWC
del torch
del bnp
del batch_norm
except ImportError as err:
print("apex was installed without --bnp flag, contrib.groupbn is not available")
| 239 | 23 | 84 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/examples/multihead_attn/func_test_multihead_attn.py | import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | 5,740 | 51.669725 | 164 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/examples/multihead_attn/perf_test_multihead_attn.py | import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | 6,163 | 52.137931 | 157 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/test/test_label_smoothing.py | import torch
from apex.contrib import xentropy as label_smoothing
import unittest
import warnings
import random
import numpy as np
import time
def label_smoothing_raw(x, target, padding_idx, smoothing):
logprobs = torch.nn.functional.log_softmax(x, dim=-1, dtype=torch.float32)
non_pad_mask = (target != paddi... | 4,800 | 36.217054 | 85 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/test/multihead_attn/test_encdec_multihead_attn_norm_add.py | import torch
import unittest
from apex.contrib.multihead_attn import EncdecMultiheadAttn
class EncdecMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
... | 3,875 | 48.692308 | 110 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/test/multihead_attn/test_self_multihead_attn.py | import torch
import unittest
from apex.contrib.multihead_attn import SelfMultiheadAttn
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
self.h... | 6,569 | 49.152672 | 147 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/test/multihead_attn/test_encdec_multihead_attn.py | import torch
import unittest
from apex.contrib.multihead_attn import EncdecMultiheadAttn
class EncdecMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
se... | 7,429 | 53.233577 | 152 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/test/multihead_attn/test_self_multihead_attn_norm_add.py | import torch
import unittest
from apex.contrib.multihead_attn import SelfMultiheadAttn
class SelfMultiheadAttnNormAddTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
self.sequences = 10
... | 3,305 | 44.287671 | 108 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/test/multihead_attn/test_mha_fused_softmax.py | import torch
import unittest
import torch.nn.functional as F
from apex.contrib.multihead_attn import fast_mask_softmax_dropout_func
class FusedSoftmaxTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 80
... | 1,800 | 40.883721 | 111 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/xentropy/softmax_xentropy.py | import torch
import xentropy_cuda
class SoftmaxCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False):
losses, max_log_sum_exp = xentropy_cuda.forward(
logits, labels, smoothing, half_to_float)
los... | 1,023 | 34.310345 | 88 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/xentropy/__init__.py | try:
import torch
import xentropy_cuda
from .softmax_xentropy import SoftmaxCrossEntropyLoss
del torch
del xentropy_cuda
del softmax_xentropy
except ImportError as err:
print("apex was installed without --xentropy flag, contrib.xentropy is not available")
| 284 | 27.5 | 90 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/fast_encdec_multihead_attn_func.py | import torch
import fast_encdec_multihead_attn
class FastEncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs_q, inputs_kv, input_weights_q, input_weights_kv, output_weights, pad_mask, dropout_prob):
heads_t = torch.tensor([heads])
... | 5,447 | 60.213483 | 152 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/fast_self_multihead_attn_norm_add_func.py | import torch
import fast_self_multihead_attn_norm_add
class FastSelfAttnNormAddFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, lyr_nrm_gamma_weights, lyr_nrm_beta_weights, input_weights, output_weights, pad_mask, dropout_prob):
heads_t = ... | 6,704 | 61.663551 | 164 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/fast_self_multihead_attn_func.py | import torch
import fast_self_multihead_attn
import fast_self_multihead_attn_bias
import fast_self_multihead_attn_bias_additive_mask
class FastSelfAttnFunc(torch.autograd.Function) :
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, input_weights, output_weights, input_biases, output_bi... | 9,331 | 64.258741 | 163 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/fast_encdec_multihead_attn_norm_add_func.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import fast_encdec_mu... | 8,251 | 61.992366 | 197 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/self_multihead_attn.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_multihead_attn_func import self_attn_func
from .fast_self_multihead_attn_func import fast_self_attn_func
from .fast_self_multihead_attn_norm_add_func import fast_self_attn_nor... | 9,054 | 49.586592 | 301 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/encdec_multihead_attn_func.py | import torch
import torch.nn.functional as F
class EncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, scale, inputs_q, inputs_kv,
input_weights_q, input_weights_kv, output_weights,
input_biases_q, input_biases_kv, output_b... | 17,587 | 64.3829 | 178 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/mask_softmax_dropout_func.py | import torch
import fast_mask_softmax_dropout
import fast_additive_mask_softmax_dropout
class MaskSoftmaxDropout(torch.autograd.Function) :
@staticmethod
def forward(ctx, is_training, heads, inputs, pad_mask, mask_additive, dropout_prob):
heads_t = torch.tensor([heads])
dropout_prob_t =... | 4,603 | 55.146341 | 91 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/encdec_multihead_attn.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_multihead_attn_func import encdec_attn_func
from .fast_encdec_multihead_attn_func import fast_encdec_attn_func
from .fast_encdec_multihead_attn_norm_add_func import fast_enc... | 7,043 | 48.605634 | 129 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/multihead_attn/self_multihead_attn_func.py | import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, scale, inputs,
input_weights, output_weights,
input_biases, output_biases,
mask, is_additive_mask, dropout_... | 14,741 | 61.466102 | 178 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/distributed_fused_adam_v2.py | import math
import torch
import importlib
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
class DistributedFusedAdamV2(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It ha... | 31,780 | 50.592532 | 282 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/distributed_fused_adam.py | import math
import torch
import importlib
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
class DistributedFusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has ... | 29,544 | 51.292035 | 282 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/fp16_optimizer.py | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` A cutdown version of apex.fp16_utils.FP16_Optimizer.
Designed only to wrap apex.contrib.optimizers.FusedAdam, FusedSGD.
Refer to apex.fp16_utils documents for more information... | 10,448 | 41.82377 | 126 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/distributed_fused_adam_v3.py | import math
import torch
import importlib
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
class DistributedFusedAdamV3(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It ha... | 15,709 | 47.190184 | 244 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/fused_sgd.py | import types
import torch
from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
This version of fused SGD implements 2 fusions.
* Fusion of the SGD ... | 9,468 | 43.665094 | 145 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/fused_lamb.py | import torch
import importlib
import math
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cu... | 9,408 | 44.019139 | 145 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/fused_adam.py | import types
import torch
import importlib
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam:... | 9,284 | 43.855072 | 145 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/contrib/optimizers/distributed_fused_lamb.py | import math
import torch
import importlib
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
class DistributedFusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ex... | 34,890 | 56.386513 | 282 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/reparameterization/reparameterization.py | import torch
from torch.nn.parameter import Parameter
import sys
class Reparameterization(object):
"""
Class interface for performing weight reparameterizations
Arguments:
name (str): name of weight parameter
dim (int): dimension over which to compute the norm
module (nn.Module): par... | 6,291 | 40.394737 | 127 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/reparameterization/__init__.py | from .weight_norm import WeightNorm
from .reparameterization import Reparameterization
def apply_weight_norm(module, name='', dim=0, hook_child=True):
r"""
Applies weight normalization to a parameter in the given module.
If no parameter is provided, applies weight normalization to all
parameters in mod... | 5,374 | 40.992188 | 106 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/reparameterization/weight_norm.py | import torch
from torch.nn.parameter import Parameter
from ..fp16_utils import Fused_Weight_Norm
import time
from .reparameterization import Reparameterization
def _norm(p, dim):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_si... | 3,203 | 39.556962 | 84 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/apex/mlp/mlp.py | from copy import copy
import math
import torch
from torch import nn
import mlp_cuda
from .. import amp
class MlpFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, bias, activation, *args):
output = mlp_cuda.forward(bias, activation, args)
ctx.save_for_backward(*args)
ctx.... | 2,614 | 31.6875 | 115 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/utils/building_utils.py | # coding=utf-8
import json
import os
import logging
import torch
from os.path import join
from models import models
from transformers import (AutoTokenizer, AutoModel, AutoConfig)
from torch.distributed import get_rank
logger = logging.getLogger(__name__)
def boolean_string(s):
if s.lower() not in {'false', 't... | 4,829 | 33.255319 | 108 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/utils/distributed.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
""" Pytorch Distributed utils
# NOTE: copied from OpenNMT-py
This piece of code was heavily inspired by the equivalent of Fairseq-py
https://github.com/pytorch/fairseq
"""
import math
import pickle
import torch.distributed
def i... | 3,995 | 32.024793 | 77 | py |
Emotional-Support-Conversation | Emotional-Support-Conversation-main/codes_zcj/utils/eval_utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import logging
from torch import Tensor
import numpy as np
from collections import defaultdict
logger = logging.getLogger(__name__)
def cal_entropy(generated):
etp_score = [0.0, 0.0, 0.0, 0.0]
div_score = [0.0, 0.0, 0.0... | 2,328 | 36.564516 | 100 | py |
aae-recommender | aae-recommender-master/setup.py | from setuptools import setup
requirements = [
'numpy>=1.16.5',
'scipy',
'sklearn',
'torch',
'gensim',
'pandas',
'joblib',
'matplotlib',
'docutils',
# 'seaborn',
]
setup(name='aaerec',
version=0.1,
description='Multi-modal Adversarial Autoencoders a... | 465 | 20.181818 | 80 | py |
aae-recommender | aae-recommender-master/aaerec/condition.py | import torch
import torch.nn as nn
from docutils.nodes import inline
from torch import optim
from abc import ABC, abstractmethod
from collections import OrderedDict, Counter
import itertools as it
import torch
import scipy.sparse as sp
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
... | 21,271 | 34.218543 | 112 | py |
aae-recommender | aae-recommender-master/aaerec/dae.py | """ Denoising Autoencoders """
# CFR https://gist.github.com/bigsnarfdude/dde651f6e06f266b48bc3750ac730f80,
# https://github.com/GunhoChoi/Kind-PyTorch-Tutorial/tree/master/07_Denoising_Autoencoder
# torch
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import argparse
fr... | 14,825 | 32.926773 | 103 | py |
aae-recommender | aae-recommender-master/aaerec/aae.py | """ Adversarially Regualized Autoencoders """
# torch
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import argparse
from torch.autograd import Variable
# sklearn
import sklearn
from .ub import AutoEncoderMixin
# numpy
import numpy as np
import scipy.sparse as sp
# own... | 38,869 | 36.737864 | 177 | py |
aae-recommender | aae-recommender-master/aaerec/vae.py | from __future__ import print_function
import argparse
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import sklearn
from gensim.models.keyedvectors import KeyedVectors
import numpy as np
import scipy.sparse as... | 13,725 | 33.487437 | 103 | py |
aae-recommender | aae-recommender-master/aaerec/transforms.py | #!/usr/bin/env python
""" This module contains transforming functions """
from functools import reduce
import scipy.sparse as sp
import numpy as np
def star(fn):
"""
Allows other functions to deal with multiple arguments.
>>> f =lambda x: x + 1
>>> f(0)
1
>>> star(f)(1,2)
(2, 3)
"""
... | 6,312 | 25.75 | 79 | py |
aae-recommender | aae-recommender-master/tests/test_condition.py | """ Tests various functionalities wrt conditions """
import pytest
import torch
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import shuffle
from aaerec.condition import EmbeddingBagCondition,\
PretrainedWordEmbeddingCondition,\
ConditionBase,\
Concatenati... | 10,284 | 30.072508 | 84 | py |
aae-recommender | aae-recommender-master/irgan/utils.py | import linecache
from encodings.punycode import selective_find
import numpy as np
import torch.nn as nn
import torch
class L2Loss(nn.Module):
def __init__(self):
super(L2Loss, self).__init__()
self.loss = nn.MSELoss()
if torch.cuda.is_available():
self.loss = self.loss.cuda()
... | 3,203 | 24.03125 | 75 | py |
aae-recommender | aae-recommender-master/irgan/cf_gan.py | from irgan.dis_model import Discriminator
from irgan.gen_model import Generator
import numpy as np
import irgan.utils as ut
import multiprocessing
import argparse
import collections
import torch
# own recommender stuff
from aaerec.base import Recommender
from aaerec.datasets import Bags
from aaerec.evaluation import ... | 14,066 | 38.963068 | 118 | py |
aae-recommender | aae-recommender-master/irgan/gen_model.py | import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
class Generator(nn.Module):
def __init__(self, itemNum, userNum, emb_dim, lamda, param=None, initdelta=0.05, learning_rate=0.05,
conditions=None):
super(Generator, self).__init__()
... | 3,397 | 42.012658 | 111 | py |
aae-recommender | aae-recommender-master/irgan/dis_model.py | import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
from .utils import L2Loss
class Discriminator(nn.Module):
def __init__(self, itemNum, userNum, emb_dim, lamda, param=None, initdelta=0.05, learning_rate=0.05,
conditions=None):
super(Dis... | 3,785 | 45.170732 | 133 | py |
CaloScore | CaloScore-main/scripts/WGAN.py | import numpy as np
import os,re
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input
import time
from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping
import horovod.tensorflow.keras as hvd
import argparse
# import tensorflow_addons as tfa
#tf.random.set_seed... | 12,320 | 40.625 | 124 | py |
CaloScore | CaloScore-main/scripts/score_matching.py | import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping
import horovod.tensorflow.keras as hvd
import argparse
import h5py as h5
import utils
from CaloScore import CaloScore
if __name__ == '__main__':
hvd.init()
g... | 4,508 | 41.140187 | 126 | py |
CaloScore | CaloScore-main/scripts/plot_caloscore.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib import gridspec
import argparse
import h5py as h5
import os
import utils
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from CaloScore import CaloScore
from WGAN import WGAN
import time
hvd.init()
gpus... | 18,481 | 41.487356 | 154 | py |
CaloScore | CaloScore-main/scripts/utils.py | import json, yaml
import os
import h5py as h5
import horovod.tensorflow.keras as hvd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.ticker as mtick
def split_data(data,nevts,frac=0.8):
data = data.shuffle(nevts)
train_data = data.tak... | 13,259 | 31.579853 | 151 | py |
CaloScore | CaloScore-main/scripts/CaloScore.py | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input
import time
import tensorflow.keras.backend as K
import horovod.tensorflow.keras as hvd
import horovod.tensorflow as hvdtf
import utils
# tf and friends
tf.random.set_seed(1234)
class CaloScore(keras.Mo... | 17,840 | 42.198547 | 119 | py |
CaloScore | CaloScore-main/scripts/train.py | import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import ReduceLROnPlateau,EarlyStopping,ModelCheckpoint
import horovod.tensorflow.keras as hvd
import argparse
import h5py as h5
import utils
from CaloScore import CaloScore
from WGAN import WGAN
if __name_... | 5,755 | 40.410072 | 130 | py |
Ward2ICU | Ward2ICU-master/run-experiment.py | import mlflow
import tempfile
import click
import torch
import torch.nn as nn
import numpy as np
from ward2icu.data import TimeSeriesVitalSigns
from ward2icu.logs import log_avg_loss, log_avg_grad, log_model, log_df
from ward2icu.models import CNNCGANGenerator, CNNCGANDiscriminator
from ward2icu.utils import synthesis_... | 4,547 | 34.255814 | 99 | py |
Ward2ICU | Ward2ICU-master/ward2icu/trainers.py | """
References:
- https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/gan/gan.py
"""
import torch
import numpy as np
import torchgan
from torch.nn import BCELoss, BCEWithLogitsLoss
from ward2icu import make_logger
from sklearn.metrics import balanced_accuracy_score, matthews_corrcoef
logger ... | 3,773 | 40.021739 | 91 | py |
Ward2ICU | Ward2ICU-master/ward2icu/utils.py | import torch
import numpy as np
from math import floor
from ward2icu import make_logger
logger = make_logger(__file__)
def tile(t, length):
''' Creates an extra dimension on the tensor t and
repeats it throughout.'''
return t.view(-1, 1).repeat(1, length)
def calc_conv_output_length(conv_layer,
... | 907 | 22.894737 | 54 | py |
Ward2ICU | Ward2ICU-master/ward2icu/layers.py | import torch.nn as nn
import torch
from ward2icu.utils import calc_conv_output_length, tile
def rnn_layer(input_size,
hidden_size=None,
num_layers=1,
dropout=0.5,
rnn_type='rnn',
nonlinearity='relu'):
# Set hidden_size to input_size if not... | 3,156 | 26.692982 | 75 | py |
Ward2ICU | Ward2ICU-master/ward2icu/metrics.py | from ward2icu.samplers import BinaryBalancedSampler, IdentitySampler
from ward2icu.models import BinaryRNNClassifier
from torch import optim
from ward2icu.utils import (train_test_split_tensor,
numpy_to_cuda, tile)
from ward2icu.trainers import BinaryClassificationTrainer
from ward2icu impor... | 3,283 | 37.635294 | 77 | py |
Ward2ICU | Ward2ICU-master/ward2icu/__init__.py | import logging
import os
import numpy as np
import torch
from pathlib import Path
def get_project_root() -> Path:
"""Returns project root folder."""
return Path(__file__).parent.parent
def get_data_dir() -> Path:
return get_project_root() / 'data'
def make_logger(file_: str = "NO_FILE") ->... | 642 | 22.814815 | 64 | py |
Ward2ICU | Ward2ICU-master/ward2icu/samplers.py | import torch
from ward2icu.utils import tile as tile_func
class IdentitySampler:
def __init__(self, X, y, tile=False):
self.X = X
self.y = tile_func(y, X.shape[1]) if tile else y
self.tile = tile
self.device = X.device
def sample(self):
return self.X, self.y
def _... | 2,600 | 32.779221 | 71 | py |
Ward2ICU | Ward2ICU-master/ward2icu/models/rgan.py | '''
Reference: https://arxiv.org/abs/1706.02633
'''
import torch
import torch.nn as nn
from torchgan.models import Generator, Discriminator
from ward2icu.layers import rnn_layer
class RGANGenerator(Generator):
def __init__(self,
sequence_length,
output_size,
hi... | 7,733 | 39.072539 | 82 | py |
Ward2ICU | Ward2ICU-master/ward2icu/models/cnngan.py | '''
Reference: https://arxiv.org/abs/1806.01875
'''
import torch
import torch.nn as nn
from torch.nn import (Linear,
Conv1d,
MaxPool1d,
AvgPool1d,
Upsample,
ReplicationPad1d,
LeakyReLU,
... | 6,188 | 31.573684 | 78 | py |
Ward2ICU | Ward2ICU-master/ward2icu/models/classifiers.py | '''
Reference: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from ward2icu.layers import rnn_layer, Conv1dLayers
from ward2icu.utils import calc_conv_output_length, flatten
class BinaryRNNClassifier(nn.Module):
def __init... | 3,442 | 32.427184 | 88 | py |
Ward2ICU | Ward2ICU-master/ward2icu/models/rcgan.py | import torch
import torch.nn as nn
from ward2icu.models import RGANGenerator, RGANDiscriminator
from ward2icu.utils import tile
class RCGANGenerator(RGANGenerator):
def __init__(self,
sequence_length,
output_size,
num_classes,
noise_size,
... | 5,087 | 39.380952 | 83 | py |
Ward2ICU | Ward2ICU-master/tests/test_samplers.py | import torch
from sybric.samplers import BinaryBalancedSampler, IdentitySampler
def test_BinaryBalancedSampler():
X = torch.eye(5)
y = torch.Tensor([0, 0, 0, 1, 1])
sampler = BinaryBalancedSampler(X, y)
for _ in range(100):
X_s, y_s = sampler.sample()
assert (y_s == torch.Tensor([0, 0... | 1,316 | 31.121951 | 66 | py |
Ward2ICU | Ward2ICU-master/tests/test_trainers.py | import pytest
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.optim import SGD
from torch.utils.data import DataLoader, Dataset
from sybric.trainers import (BinaryClassificationTrainer,
MinMaxBinaryCGANTrainer)
from sybric.samplers import Ide... | 8,605 | 37.765766 | 88 | py |
Ward2ICU | Ward2ICU-master/tests/test_models.py | import torch
import numpy as np
import pytest
from torchgan.losses import MinimaxDiscriminatorLoss
from sybric.models import (RGANGenerator,
RGANDiscriminator,
RCGANGenerator,
RCGANDiscriminator,
BinaryRNNClass... | 5,003 | 28.435294 | 73 | py |
Ward2ICU | Ward2ICU-master/tests/test_utils.py | from torch import Tensor
from sybric.utils import tile
def test_tile():
y = Tensor([0, 1, 2])
y_tiled = tile(y, 3)
expected = Tensor([[0, 0, 0],
[1, 1, 1],
[2, 2, 2]])
assert (y_tiled == expected).all()
| 268 | 21.416667 | 38 | py |
BPnP | BPnP-master/demoCamCali.py | from __future__ import print_function, division
import torch
import numpy as np
import BPnP
import matplotlib.pyplot as plt
import kornia as kn
from scipy.io import savemat, loadmat
device = 'cuda'
cube = loadmat('demo_data/cube.mat')
pts3d_gt = torch.tensor(cube['pts3d'], device=device, dtype=torch.float)
n = pts3d_... | 2,206 | 26.5875 | 114 | py |
BPnP | BPnP-master/demoSfM.py | import torch
import numpy as np
import BPnP
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torchvision
from scipy.io import loadmat, savemat
device = 'cuda'
pl = 0.00000586
f = 0.0005
u = 0
v = 0
K = torch.tensor(
[[f, 0, u],
[0, f, v],
[0, 0, 1]], dtype=torch.float, devi... | 2,773 | 25.419048 | 154 | py |
BPnP | BPnP-master/BPnP.py | import torch
import cv2 as cv
import numpy as np
import kornia as kn
class BPnP(torch.autograd.Function):
"""
Back-propagatable PnP
INPUTS:
pts2d - the 2D keypoints coordinates of size [batch_size, num_keypoints, 2]
pts3d - the 3D keypoints coordinates of size [num_keypoints, 3]
K - the cam... | 15,120 | 42.079772 | 205 | py |
BPnP | BPnP-master/demoPoseEst.py | import torch
import numpy as np
import BPnP
import matplotlib.pyplot as plt
import torchvision
from scipy.io import loadmat, savemat
import kornia as kn
device = 'cuda'
cube = loadmat('demo_data/cube.mat')
pts3d_gt = torch.tensor(cube['pts3d'], device=device, dtype=torch.float)
n = pts3d_gt.size(0)
poses = loadmat('d... | 3,403 | 25.59375 | 156 | py |
PMNet-PMNet | PMNet-PMNet/network/PMNet.py | from __future__ import absolute_import, print_function
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torchvision import models
try:
from encoding.nn import SyncBatchNorm
_BATCH_NORM = SyncBatchNorm
except:
_BATCH_NORM = nn.BatchNorm2d
_BOT... | 7,375 | 30.793103 | 90 | py |
PMNet-PMNet | PMNet-PMNet/network/RadioWnet.py | import torch
import torch.nn as nn
from torchvision import models
# Encoder building block (used in decoder as well)
def convrelu(in_channels, out_channels, kernel, padding, pool):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
nn... | 10,392 | 43.991342 | 102 | py |
PMNet-PMNet | PMNet-PMNet/network/UNet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# The basic building block of Unet
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_co... | 3,179 | 31.121212 | 96 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.