repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
UnifiedSKG | UnifiedSKG-main/seq2seq_construction/hybridqa.py | import copy
import os
import torch
from datasets import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from transformers import AutoTokenizer
from utils.processor import get_default_processor
from tqdm import tqdm
class Constructor(object):
def __init__(self, args):
... | 5,678 | 44.432 | 131 | py |
UnifiedSKG | UnifiedSKG-main/seq2seq_construction/mmqa.py | import copy
import os
import torch
from datasets import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from transformers import AutoTokenizer
from tqdm import tqdm
from utils.processor import get_default_processor
class Constructor(object):
def __init__(self, args):... | 6,853 | 45.62585 | 131 | py |
UnifiedSKG | UnifiedSKG-main/seq2seq_construction/grailqa.py | import os
import torch
from datasets import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
import numpy as np
import copy
from tqdm import tqdm
class Constructor(object):
def __init__(self, args):
self.args = args
def to_seq2seq(self, raw_datasets: DatasetD... | 7,085 | 35.338462 | 107 | py |
UnifiedSKG | UnifiedSKG-main/models/prompt/modeling_t5.py | # coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2... | 84,860 | 44.599678 | 213 | py |
UnifiedSKG | UnifiedSKG-main/models/prompt/modeling_bart.py | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/... | 68,276 | 46.250519 | 159 | py |
UnifiedSKG | UnifiedSKG-main/models/adapter/adapter.py | import math
import torch
from torch import nn
class Activation_Function_Class(nn.Module):
"""
Implementation of various activation function.
"""
def __init__(self, hidden_act):
if hidden_act.lower() == "relu":
self.f = nn.functional.relu
elif hidden_act.lower() == "tanh"... | 3,823 | 30.344262 | 119 | py |
UnifiedSKG | UnifiedSKG-main/models/adapter/modeling_t5.py | # coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2... | 85,211 | 44.519231 | 213 | py |
UnifiedSKG | UnifiedSKG-main/models/unified/base.py | import os
import torch
from torch import nn
from transformers.modeling_utils import (
ModuleUtilsMixin, PushToHubMixin,
logging, Union, Optional, Callable, unwrap_model, get_parameter_dtype,
FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME,
is_offline_mode, is_remote_url, hf_bucket_ur... | 11,446 | 48.9869 | 186 | py |
UnifiedSKG | UnifiedSKG-main/models/unified/prefixtuning.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
from torch import nn
from transformers import AutoTokenizer
from .base import PushToHubFriendlyModel
from ..prompt.modeling_auto import AutoModelForSeq2SeqLM
class Model(PushToHubFriendlyModel):
def __init__(self, args):
super().__init__()
... | 12,841 | 41.806667 | 148 | py |
UnifiedSKG | UnifiedSKG-main/models/unified/finetune.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from torch import nn
from .base import PushToHubFriendlyModel
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
class Model(PushToHubFriendlyModel):
def __init__(self, args):
super().__init__()
self.args = args
# Load tokenizer an... | 1,319 | 29.697674 | 90 | py |
UnifiedSKG | UnifiedSKG-main/models/unified/adaptertuning.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
from torch import nn
from transformers import AutoTokenizer
from .base import PushToHubFriendlyModel
from ..adapter.modeling_auto import AutoModelForSeq2SeqLM
class Model(PushToHubFriendlyModel):
def __init__(self, args):
super().__init__()
... | 12,905 | 40.632258 | 133 | py |
UnifiedSKG | UnifiedSKG-main/models/unified/combined_prefixtuning.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict
import torch
from torch import nn
from transformers import AutoTokenizer
from .base import PushToHubFriendlyModel
from ..prompt.modeling_auto import AutoModelForSeq2SeqLM
def aggregate_prompt(
past_prompt_dict: OrderedDict, task_... | 25,838 | 39.563579 | 133 | py |
UnifiedSKG | UnifiedSKG-main/utils/dataset.py | import os
import torch
from torch.utils.data import Dataset
class TokenizedDataset(Dataset):
# TODO: A unified structure-representation.
def __init__(self, args, training_args, tokenizer, seq2seq_dataset, ):
self.args = args
self.training_args = training_args
self.tokenizer = tokenizer... | 6,393 | 50.152 | 131 | py |
UnifiedSKG | UnifiedSKG-main/utils/trainer.py | import collections
import json
import time
from typing import Any, Dict, List, Optional, Tuple, Union
from typing import NamedTuple
import datasets
import numpy as np
import torch
import transformers.trainer_seq2seq
from torch.utils.data import Dataset
from packaging import version
from torch import nn
from torch.util... | 14,806 | 40.709859 | 119 | py |
UnifiedSKG | UnifiedSKG-main/tasks/multiwoz.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lice... | 40,787 | 41.709948 | 642 | py |
EfficientFixMatch | EfficientFixMatch-main/train.py | import argparse
import logging
import math
import os
import random
import shutil
import time
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader, RandomSampl... | 19,313 | 38.018182 | 238 | py |
EfficientFixMatch | EfficientFixMatch-main/train_subset.py | import argparse
import copy
import logging
import math
import os
import random
import shutil
import time
from collections import OrderedDict
from subset_selection.ssl import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler ... | 50,279 | 43.812834 | 224 | py |
EfficientFixMatch | EfficientFixMatch-main/dataset/cifar.py | import logging
import math
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
from .randaugment import RandAugmentMC
logger = logging.getLogger(__name__)
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
cifar100_mean = (0.5071, ... | 6,239 | 33.098361 | 87 | py |
EfficientFixMatch | EfficientFixMatch-main/dataset/randaugment.py | # code in this file is adpated from
# https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py
# https://github.com/google-research/fixmatch/blob/master/third_party/auto_augment/augmentations.py
# https://github.com/google-research/fixmatch/blob/master/libml/ctaugment.py
import logging
i... | 5,821 | 25.343891 | 99 | py |
EfficientFixMatch | EfficientFixMatch-main/models/resnext.py | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def mish(x):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)"""
return x * torch.tanh(F.softplus(x))
class BatchNorm2d(nn.BatchNorm... | 7,988 | 41.494681 | 112 | py |
EfficientFixMatch | EfficientFixMatch-main/models/ema.py | from copy import deepcopy
import torch
class ModelEMA(object):
def __init__(self, args, model, decay):
self.ema = deepcopy(model)
self.ema.to(args.device)
self.ema.eval()
self.decay = decay
self.ema_has_module = hasattr(self.ema, 'module')
# Fix EMA. https://github... | 1,297 | 32.282051 | 78 | py |
EfficientFixMatch | EfficientFixMatch-main/models/wideresnet.py | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def mish(x):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)"""
return x * torch.tanh(F.softplus(x))
class PSBatchNorm2d(nn.BatchNo... | 5,928 | 40.461538 | 119 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/helpers/omp_solvers.py | import numpy as np
np.seterr(all='raise')
from numpy.linalg import cond
from numpy.linalg import inv
from numpy.linalg import norm
from scipy import sparse as sp
from scipy.linalg import lstsq
from scipy.linalg import solve
from scipy.optimize import nnls
import torch
# NOTE: Textbook Primal-Dual IPM: Boyd & Vanden... | 19,634 | 31.508278 | 152 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/helpers/optimalWeights.py | import numpy as np
np.seterr(all='raise')
from numpy.linalg import cond
from numpy.linalg import inv
from numpy.linalg import norm
from scipy import sparse as sp
from scipy.linalg import lstsq
from scipy.linalg import solve
from scipy.optimize import nnls
import torch
def OptimalWeights(A, b, tol=1E-4, nnz=None, d... | 1,326 | 24.519231 | 83 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/ssl/randomstrategy.py | import numpy as np
import torch
class RandomStrategy(object):
"""
This is the Random Selection Strategy class where we select a set of random points as a datasubset
and often acts as baselines to compare other selection strategies.
Parameters
----------
trainloader: class
Loading the ... | 1,298 | 27.23913 | 102 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/ssl/fixedweightstrategy.py | import math
import time
import torch
import numpy as np
from .dataselectionstrategy import DataSelectionStrategy
from ..helpers import OrthogonalMP_REG_Parallel, OrthogonalMP_REG, OptimalWeights
from torch.utils.data import Subset, DataLoader
class FixedWeightStrategy(DataSelectionStrategy):
"""
Parameters
-... | 8,931 | 46.764706 | 161 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/ssl/glisterstrategy.py | import math
import random
import time
import torch
import torch.nn.functional as F
from .dataselectionstrategy import DataSelectionStrategy
class GLISTERStrategy(DataSelectionStrategy):
"""
Implementation of GLISTER-ONLINE Strategy from the paper :footcite:`killamsetty2020glister` for supervised learning fra... | 17,000 | 51.798137 | 280 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/ssl/craigstrategy.py | import apricot
import numpy as np
import torch
from scipy.sparse import csr_matrix
from .dataselectionstrategy import DataSelectionStrategy
from torch.utils.data.sampler import SubsetRandomSampler
import math
class CRAIGStrategy(DataSelectionStrategy):
"""
Implementation of CRAIG Strategy from the paper :foot... | 13,581 | 43.825083 | 215 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/ssl/dataselectionstrategy.py | import torch
from torch.nn.functional import cross_entropy
class DataSelectionStrategy(object):
"""
Implementation of Data Selection Strategy class which serves as base class for other
dataselectionstrategies for general learning frameworks.
Parameters
----------
trainloader: class
... | 13,937 | 48.601423 | 111 | py |
EfficientFixMatch | EfficientFixMatch-main/subset_selection/ssl/ompgradmatchstrategy.py | import math
import time
import torch
import numpy as np
from .dataselectionstrategy import DataSelectionStrategy
from ..helpers import OrthogonalMP_REG_Parallel, OrthogonalMP_REG
from torch.utils.data import Subset, DataLoader
class OMPGradMatchStrategy(DataSelectionStrategy):
"""
Implementation of OMPGradMat... | 10,151 | 49.257426 | 251 | py |
EfficientFixMatch | EfficientFixMatch-main/utils/misc.py | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
'''
import logging
import torch
logger = logging.getLogger(__name__)
__all__ = ['get_mean_and_std', 'accuracy', 'AverageMeter']
def get_mean_and_std(dataset):
'''Compute the mean and std value ... | 1,726 | 25.569231 | 95 | py |
fast-rcnn | fast-rcnn-master/tools/compress_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compress a Fast R-CNN network using truncated... | 3,804 | 29.685484 | 81 | py |
fast-rcnn | fast-rcnn-master/tools/test_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image databas... | 2,706 | 32.012195 | 77 | py |
fast-rcnn | fast-rcnn-master/tools/_init_paths.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Set up paths for Fast R-CNN."""
import os.path as osp
import sys
d... | 637 | 23.538462 | 66 | py |
fast-rcnn | fast-rcnn-master/tools/demo.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample ima... | 5,446 | 33.694268 | 80 | py |
fast-rcnn | fast-rcnn-master/tools/train_svms.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and ... | 13,480 | 37.081921 | 80 | py |
fast-rcnn | fast-rcnn-master/tools/train_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of int... | 3,134 | 32.709677 | 78 | py |
fast-rcnn | fast-rcnn-master/lib/roi_data_layer/layer.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""The data layer used during training to train a Fast R-CNN network.
... | 5,930 | 35.838509 | 75 | py |
fast-rcnn | fast-rcnn-master/lib/roi_data_layer/minibatch.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
impor... | 7,337 | 38.664865 | 79 | py |
fast-rcnn | fast-rcnn-master/lib/fast_rcnn/test.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast... | 11,975 | 35.512195 | 78 | py |
fast-rcnn | fast-rcnn-master/lib/fast_rcnn/config.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config option... | 6,177 | 29.584158 | 80 | py |
fast-rcnn | fast-rcnn-master/lib/fast_rcnn/train.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network."""
import caffe
from fast_rcnn.config i... | 4,449 | 34.6 | 79 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/finance/model.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, f... | 2,689 | 34.866667 | 89 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/finance/ddpg_agent.py | import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e4) # replay buffer size
BATCH_SIZE = 128 # minibatch size
GAMMA = 0.99 # discount fact... | 7,832 | 40.010471 | 127 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/ddpg-pendulum/model.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, f... | 2,693 | 34.92 | 89 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/ddpg-pendulum/ddpg_agent.py | import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 128 # minibatch size
GAMMA = 0.99 # discount fact... | 7,794 | 40.243386 | 127 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/dqn/solution/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int):... | 1,015 | 34.034483 | 82 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/dqn/solution/dqn_agent.py | import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 ... | 6,277 | 38.734177 | 127 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/dqn/exercise/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
... | 647 | 27.173913 | 63 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/dqn/exercise/dqn_agent.py | import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 ... | 5,768 | 38.513699 | 127 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/ddpg-bipedal/model.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, f... | 2,708 | 35.12 | 100 | py |
deep-reinforcement-learning | deep-reinforcement-learning-master/ddpg-bipedal/ddpg_agent.py | import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 128 # minibatch size
GAMMA = 0.99 # discount fact... | 7,794 | 40.243386 | 127 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/real_world_experiments/main.py | import os
import xarray as xr
import numpy as np
import wandb
import argparse
import torch
import seaborn as sns
sns.set_context('paper', font_scale=1.8)
import sys
sys.path.append('../utils')
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.... | 4,321 | 33.301587 | 110 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/real_world_experiments/random_statistics.py | import os
from pathlib import Path
import argparse
import torch
import seaborn as sns
import xarray as xr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(cu... | 13,142 | 37.769912 | 124 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/synthetic_experiments/main.py | ###########
# Imports #
###########
import argparse
import time
import os.path
import sys
import inspect
import numpy as np
import seaborn as sns
sns.set_context('paper', font_scale=2)
import wandb
# login to wandb:
wandb.init(anonymous="allow", project="mTAF_ICML")
from torch.nn import functional as F
currentdir = o... | 6,322 | 32.632979 | 171 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/synthetic_experiments/estimate_tailindices.py | import torch
import argparse
import os.path
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from utils.flows import MAF
if torch.cuda.is_available():
torch.device("cuda")
devi... | 873 | 26.3125 | 86 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/synthetic_experiments/copula_baseline.py | import torch
import argparse
import time
import os.path
import sys
import inspect
import numpy as np
import copulas
from copulas.multivariate import GaussianMultivariate
from scipy.stats import kstest
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot_2samples
import matplotlib.pyplot as plt
... | 4,696 | 32.312057 | 148 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/utils/distributions.py | from nflows.distributions.base import Distribution
import torch
from nflows.utils import torchutils
from torch.distributions.studentT import StudentT
from torch.distributions.normal import Normal
from torch import nn
import numpy as np
if torch.cuda.is_available():
torch.device("cuda")
device = "cuda"
else:
... | 6,795 | 38.976471 | 176 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/utils/tail_permutation.py | """Implementations of permutation-like transforms."""
import torch
import numpy as np
import nflows.utils.typechecks as check
from nflows.transforms.permutations import Permutation
from nflows.transforms.linear import Linear
from nflows.transforms.lu import LULinear
from torch.nn import functional as F
from torch.nn im... | 8,371 | 38.490566 | 119 | py |
marginalTailAdaptiveFlow | marginalTailAdaptiveFlow-master/utils/flows.py | # torch
import torch
from torch import optim
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
# standard modules
import os, os.path
import sys
import inspect
from pathlib import Path
# data proc... | 35,781 | 47.419486 | 243 | py |
SelfMemory | SelfMemory-main/src/boring_model.py | import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from pytorch_lightning.strategies import DDPStrategy
class RandomDataset(torch.utils.data.Dataset):
def __init__(self) -> None:
super().__init__()
self.data = [torch.rand(512) for _ in... | 2,140 | 28.328767 | 98 | py |
SelfMemory | SelfMemory-main/src/generate_hyps.py | import json,os,time,argparse,warnings,time,yaml
from functools import partial
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# os.environ['CUDA_VISIBLE_DEVICES']='1'
## torch
import torch
import torch.distributed as dist
## lightning
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
from pyto... | 8,564 | 39.211268 | 131 | py |
SelfMemory | SelfMemory-main/src/train_generator.py | import json,os,time,argparse,warnings,time,yaml,shutil
from functools import partial
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from os import system as shell
## torch
import torch
import torch.distributed as dist
## lightning
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
from pytorch... | 21,832 | 41.978346 | 212 | py |
SelfMemory | SelfMemory-main/src/utils/ddp_utils.py | def set_available_port():
import os
import random
def port_is_used(port,ip='127.0.0.1'):
"""
test whether a port is used or not
"""
import socket
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
s.connect((ip,port))
s.shut... | 4,651 | 36.516129 | 138 | py |
SelfMemory | SelfMemory-main/src/utils/optim_utils.py | # coding=utf-8
import torch
import math
from torch.optim.lr_scheduler import LambdaLR
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after ... | 1,378 | 43.483871 | 146 | py |
SelfMemory | SelfMemory-main/src/utils/utils.py | from dataclasses import dataclass
@dataclass
class LabelSmoother:
"""copied from huggingface/transformers"""
ignore_index: int = -100
def __call__(self, logits, labels, shift_labels=False,epsilon: float = 0.1):
import torch
import torch.nn as nn
if shift_labels:
... | 12,146 | 30.632813 | 115 | py |
SelfMemory | SelfMemory-main/src/utils/metrics_utils.py | def get_rouge_score(hyps,refs):
from compare_mt.rouge.rouge_scorer import RougeScorer
assert len(hyps)==len(refs)
lens = len(hyps)
rouge_scorer = RougeScorer(['rouge1', 'rouge2', 'rougeLsum'], use_stemmer=True)
rouge1 = rouge2 = rougel = 0.0
for hyp,ref in zip(hyps,refs):
score =... | 6,998 | 34.348485 | 160 | py |
SelfMemory | SelfMemory-main/src/model/dualencoder_transformer.py | from transformers.models.marian.modeling_marian import *
from dataclasses import dataclass
@dataclass
class DualEncoderOutput(BaseModelOutput):
src_last_hidden_state:torch.Tensor=None
memory_last_hidden_state:torch.Tensor=None
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = N... | 30,328 | 47.217806 | 150 | py |
SelfMemory | SelfMemory-main/src/model/dualencoder_pegasus.py | from transformers.models.pegasus.modeling_pegasus import *
from dataclasses import dataclass
from transformers.modeling_outputs import BaseModelOutput
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_l... | 32,040 | 45.775182 | 150 | py |
SelfMemory | SelfMemory-main/src/model/dualencoder_bart.py | from transformers.models.bart.modeling_bart import *
from dataclasses import dataclass
@dataclass
class DualEncoderOutput(BaseModelOutput):
src_last_hidden_state:torch.Tensor=None
memory_last_hidden_state:torch.Tensor=None
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None)... | 27,837 | 45.319468 | 119 | py |
sed_eval | sed_eval-master/documentation/source/conf.py | # -*- coding: utf-8 -*-
#
# sed_eval documentation build configuration file, created by
# sphinx-quickstart on Thu May 8 15:55:45 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# ... | 10,179 | 28.766082 | 84 | py |
ups | ups-main/train-cifar.py | import argparse
import logging
import math
import os
import random
import shutil
import time
from copy import deepcopy
from collections import OrderedDict
import pickle
import numpy as np
from re import search
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim as optim
... | 13,924 | 49.270758 | 179 | py |
ups | ups-main/models/cnn13.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys, os
import random
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
## code for CNN13 from https://github.com/benathi/fastswa-semi-sup/blob/master/mean_teacher/architectures.py
from torch.nn.utils import weight_... | 3,555 | 29.655172 | 108 | py |
ups | ups-main/models/shakeshake.py | # Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View... | 11,443 | 35.44586 | 107 | py |
ups | ups-main/models/wideresnet.py | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def mish(x):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)"""
return x * torch.tanh(F.softplus(x))
class PSBatchNorm2d(nn.BatchNo... | 5,517 | 40.488722 | 119 | py |
ups | ups-main/utils/pseudo_labeling_util.py | import random
import time
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from .misc import AverageMeter, accuracy
from .utils import enable_dropout
def pseudo_labeling(args, data_loader, model, itr):
batch_time = AverageMeter()
data_time = AverageMeter()
... | 6,957 | 42.761006 | 187 | py |
ups | ups-main/utils/evaluate.py | import random
import time
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from .misc import AverageMeter, accuracy
def test(args, test_loader, model):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter(... | 1,701 | 32.372549 | 176 | py |
ups | ups-main/utils/misc.py | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
'''
import logging
import torch
logger = logging.getLogger(__name__)
__all__ = ['get_mean_and_std', 'accuracy', 'AverageMeter']
def get_mean_and_std(dataset):
'''Compute the mean and std value ... | 1,810 | 25.632353 | 95 | py |
ups | ups-main/utils/utils.py | import math
import os
import random
import shutil
import numpy as np
import torch
import sys
sys.path.append('../..')
from torch.optim.lr_scheduler import LambdaLR
def save_checkpoint(state, is_best, checkpoint, itr):
filename=f'checkpoint_{itr}.pth.tar'
filepath = os.path.join(checkpoint, filename)
torch.... | 2,154 | 32.671875 | 92 | py |
ups | ups-main/utils/train_util.py | import random
import time
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from .misc import AverageMeter, accuracy
def train_regular(args, lbl_loader, nl_loader, model, optimizer, scheduler, epoch, itr):
batch_time = AverageMeter()
data_time = AverageMeter()... | 4,363 | 34.193548 | 199 | py |
ups | ups-main/data/augmentations.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
... | 7,726 | 25.372014 | 134 | py |
ups | ups-main/data/cifar.py | import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
from .augmentations import RandAugment, CutoutRandom
import pickle
import os
def get_cifar10(root='data/datasets', n_lbl=4000, ssl_idx=None, pseudo_lbl=None, itr=0, split_txt=''):
os.makedirs(root, exist_o... | 10,202 | 37.070896 | 104 | py |
DiffKD | DiffKD-main/example.py | import torch
import torch.nn.functional as F
from diffkd import DiffKD
def main():
# init DiffKD loss
diffkd = DiffKD(student_channels=128, teacher_channels=512,
kernel_size=3, use_ae=True, ae_channels=256)
print(diffkd)
# get the student feature and teacher feature
student_fe... | 1,651 | 35.711111 | 115 | py |
DiffKD | DiffKD-main/diffkd_modules.py | import torch
import torch.nn as nn
class NoiseAdapter(nn.Module):
def __init__(self, channels, kernel_size=3):
super().__init__()
if kernel_size == 3:
self.feat = nn.Sequential(
Bottleneck(channels, channels, reduction=8),
nn.AdaptiveAvgPool2d(1)
... | 5,043 | 31.96732 | 114 | py |
DiffKD | DiffKD-main/scheduling_ddim.py | # modified from https://raw.githubusercontent.com/huggingface/diffusers/main/src/diffusers/schedulers/scheduling_ddim.py
# Copyright 2022 Stanford University Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in com... | 20,745 | 44.495614 | 120 | py |
DiffKD | DiffKD-main/diffkd.py | import torch
from torch import nn
import torch.nn.functional as F
from diffkd_modules import DiffusionModel, NoiseAdapter, AutoEncoder, DDIMPipeline
from scheduling_ddim import DDIMScheduler
class DiffKD(nn.Module):
def __init__(
self,
student_channels,
teacher_channels,
... | 3,330 | 39.13253 | 122 | py |
clusterjob | clusterjob-master/example/Python/pytorch/mnist/mnist.py | # DCNN Tranining Example
# Data: MNIST
# Author: Hatef Monajemi (monajemi AT stanford DOT edu)
# Date: Aug 2017
# Stanford, CA
import numpy, os.path
#import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
use_gpu = torch.cuda.i... | 5,007 | 27.947977 | 109 | py |
python-fsps | python-fsps-master/docs/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
import sys
# Patch the path to get the local version.
d = os.path.dirname
sys.path.insert(0, d(d(os.path.abspath(__file__))))
extensions = ["sphinx.ext... | 1,172 | 26.27907 | 71 | py |
varz | varz-master/tests/util.py | import lab as B
import numpy as np
import pytest
import tensorflow as tf
import torch
import wbml.out
from numpy.testing import assert_allclose
from plum import Dispatcher
from varz import Vars
from wbml import out as out
__all__ = [
"Value",
"approx",
# Numerical checks:
"assert_lower_triangular",
... | 2,610 | 19.084615 | 63 | py |
varz | varz-master/tests/__init__.py | import os
import sys
# Add package to path.
file_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(file_dir, "..")))
# Load LAB extensions.
# noinspection PyUnresolvedReferences
import lab.torch
# noinspection PyUnresolvedReferences
import lab.tensorflow
# Load TensorFlow extension.
# ... | 379 | 20.111111 | 65 | py |
varz | varz-master/tests/test_minimise.py | import jax.numpy as jnp
import lab as B
import numpy as np
import pytest
import tensorflow as tf
import torch
import varz.autograd
import varz.jax
import varz.tensorflow
import varz.torch
import wbml.out as out
from plum import isinstance
from varz import Vars
from varz.minimise import _convert_and_validate_names
from... | 7,981 | 27.71223 | 87 | py |
varz | varz-master/tests/test_vars.py | import lab as B
import numpy as np
import pytest
import torch
from varz import Vars
# noinspection PyUnresolvedReferences
from .util import (
KV,
approx,
assert_lower_triangular,
assert_orthogonal,
assert_positive_definite,
dtype,
vs,
vs_source,
)
def test_get_latent_vars():
vs = ... | 14,778 | 26.727955 | 82 | py |
varz | varz-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# varz documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a ... | 5,257 | 29.748538 | 79 | py |
varz | varz-master/varz/minimise.py | import importlib
import traceback
from functools import wraps
from typing import Tuple
import lab as B
import numpy as np
import wbml.out as out
from plum import Dispatcher, convert, isinstance
from scipy.optimize import fmin_l_bfgs_b
from .adam import ADAM
__all__ = ["minimise_l_bfgs_b", "minimise_adam"]
_dispatch... | 11,373 | 30.682451 | 88 | py |
varz | varz-master/varz/vars.py | import logging
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from functools import reduce
from operator import mul
import lab as B
import numpy as np
import wbml.out
from lab.shape import Shape
from plum import Dispatcher, isinstance
from .util import lazy_jnp as jnp
from... | 27,085 | 30.277136 | 88 | py |
varz | varz-master/varz/util.py | import importlib
import logging
import re
from itertools import product
from typing import Union
from functools import reduce
from operator import mul
import lab as B
from plum import Dispatcher
__all__ = ["lazy_tf", "lazy_torch", "lazy_jnp", "pack", "unpack", "match"]
_dispatch = Dispatcher()
log = logging.getLogg... | 2,090 | 21.978022 | 77 | py |
varz | varz-master/varz/torch/__init__.py | # noinspection PyUnresolvedReferences
import lab.torch
from .minimise import *
# noinspection PyUnresolvedReferences
from .. import *
| 136 | 16.125 | 37 | py |
varz | varz-master/varz/jax/minimise.py | import logging
import lab.jax as B
import numpy as np
from jax import value_and_grad
from plum import convert
from ..minimise import make_l_bfgs_b, make_adam, exception
__all__ = ["minimise_l_bfgs_b", "minimise_adam"]
log = logging.getLogger(__name__)
def _wrap_f(vs, names, f, jit, _convert):
# Differentiable... | 1,524 | 26.232143 | 88 | py |
varz | varz-master/varz/jax/__init__.py | import jax
# noinspection PyUnresolvedReferences
import lab.jax
from .minimise import *
# noinspection PyUnresolvedReferences
from .. import *
# We will need `float64`s
jax.config.update("jax_enable_x64", True)
| 215 | 15.615385 | 41 | py |
covid-chestxray-dataset | covid-chestxray-dataset-master/scripts/test_dataloader.py | import torch
import torchvision
import torchxrayvision as xrv
from tqdm import tqdm
import sys
# print stats
for views in [["PA","AP"],["AP Supine"]]:
print(xrv.datasets.COVID19_Dataset(views=views,
imgpath="../images",
csvpath="../m... | 864 | 29.892857 | 75 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.