repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
numpy | numpy-main/numpy/ma/tests/test_core.py | # pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
__author__ = "Pierre GF Gerard-Marchant"
import sys
import warnings
import copy
import operator
import itertools
import textwrap
import pi... | 213,657 | 36.795507 | 102 | py |
numpy | numpy-main/doc/source/conf.py | import os
import re
import sys
import importlib
# Minimum version, enforced by sphinx
needs_sphinx = '4.3'
# This is a nasty hack to use platform-agnostic names for types in the
# documentation.
# must be kept alive to hold the patched names
_name_cache = {}
def replace_scalar_type_names():
""" Rename numpy ty... | 16,868 | 30.649156 | 90 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/sgd.py | import math
import torch
from torch.optim import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable... | 4,059 | 35.576577 | 88 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/cifar10_normal_train.py | from cifar10_models import *
def train(train_data, labels, model, criterion, optimizer, use_cuda, num_batchs=999999, debug_='MEDIUM', batch_size=16):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
t... | 3,896 | 30.682927 | 161 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/adam.py | import math
import torch
from torch.optim import Optimizer
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
... | 3,998 | 42.467391 | 116 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/cifar10_models.py | from __future__ import print_function
import argparse, os, sys, csv, shutil, time, random, operator, pickle, ast
import numpy as np
import pandas as pd
import torch.nn.functional as F
import torch
import pickle
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import models.cifar as models
sys... | 6,690 | 35.167568 | 97 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/cifar10_util.py | from cifar10_models import *
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(0)
m.bias.data.fill_(0)
def save_checkpoint_user_(user_num, state, is_best, checkpo... | 5,029 | 36.259259 | 135 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/imagenet/resnext.py | from __future__ import division
"""
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua
... | 5,698 | 31.752874 | 105 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/preresnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import math
__all__ = ['preresnet']
def conv3x3(in_planes, out_planes, st... | 4,624 | 28.08805 | 77 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, strid... | 4,662 | 28.14375 | 77 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = ['VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn','vgg19_bn', 'vgg19',]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30... | 4,070 | 28.933824 | 113 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/densenet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['densenet']
from torch.autograd import Variable
class Bottleneck(nn.Module):
def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = expansion * gr... | 4,724 | 30.711409 | 99 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/resnext.py | from __future__ import division
"""
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
"""
i... | 5,597 | 43.428571 | 144 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/__init__.py | from __future__ import absolute_import
"""The models subpackage contains definitions for the following model for CIFAR10/CIFAR100
architectures:
- `AlexNet`_
- `VGG`_
- `ResNet`_
- `SqueezeNet`_
- `DenseNet`_
You can construct a model with random weights by calling its constructor:
.. code:: python
import... | 2,270 | 30.541667 | 90 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/convnet.py | from __future__ import absolute_import
'''
Simple convnet for cifar dataset.
Ported form https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
'''
import torch.nn as nn
import math
__all__ = ['convnet']
class Net(nn.Module):
def __init__(self,n_classes=10):
super(Net, self).__init__()
... | 922 | 22.666667 | 78 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/alexnet.py | '''AlexNet for CIFAR10. FC layers are removed. Paddings are adjusted.
Without BN, the start learning rate should be 0.01
(c) YANG, Wei
'''
import torch.nn as nn
__all__ = ['alexnet']
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = n... | 1,359 | 29.222222 | 69 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/cifar10/models/cifar/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplac... | 3,896 | 40.457447 | 116 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/utils/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class... | 4,056 | 32.808333 | 102 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/utils/misc.py | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.i... | 2,206 | 28.039474 | 110 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/utils/logger.py | # A simple torch style logger
# (C) Wei YANG 2017
from __future__ import absolute_import
import matplotlib.pyplot as plt
import os
import sys
import numpy as np
__all__ = ['Logger', 'LoggerMonitor', 'savefig']
def savefig(fname, dpi=None):
dpi = 150 if dpi == None else dpi
plt.savefig(fname, dpi=dpi)
def... | 4,398 | 33.637795 | 100 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/utils/visualize.py | import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
from misc import *
__all__ = ['make_image', 'show_batch', 'show_mask', 'show_mask_single']
# functions to show an image
def make_image(img, mean=(0,0,0), std=(1,1,1))... | 3,794 | 33.5 | 95 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/femnist/femnist_models.py | from __future__ import print_function
import argparse, os, sys, csv, shutil, time, random, operator, pickle, ast, json
import numpy as np
import pandas as pd
import torch.nn.functional as F
import torch
import pickle
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
sys.path.insert(0, './../ut... | 1,602 | 28.685185 | 80 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/femnist/sgd.py | import math
import torch
from torch.optim import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable... | 4,059 | 35.576577 | 88 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/femnist/adam.py | import math
import torch
from torch.optim import Optimizer
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
... | 3,998 | 42.467391 | 116 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/femnist/femnist_normal_train.py | from femnist_models import *
def train(train_data, labels, model, criterion, optimizer, use_cuda, num_batchs=999999, debug_='MEDIUM', batch_size=16):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
t... | 3,896 | 30.682927 | 161 | py |
NDSS21-Model-Poisoning | NDSS21-Model-Poisoning-main/femnist/femnist_util.py | from femnist_models import *
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.xavier_uniform_(m.weight)
elif classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(m.weight)
elif classname.find('BatchNorm') != -1:
m.weig... | 3,737 | 35.647059 | 135 | py |
MAGNN | MAGNN-master/run_DBLP.py | import time
import argparse
import torch
import torch.nn.functional as F
import numpy as np
from utils.pytorchtools import EarlyStopping
from utils.data import load_DBLP_data
from utils.tools import index_generator, evaluate_results_nc, parse_minibatch
from model import MAGNN_nc_mb
# Params
out_dim = 4
dropout_rate ... | 10,569 | 50.813725 | 139 | py |
MAGNN | MAGNN-master/run_IMDB.py | import time
import argparse
import torch.nn.functional as F
import torch.sparse
import numpy as np
import dgl
from utils.pytorchtools import EarlyStopping
from utils.data import load_IMDB_data
from utils.tools import evaluate_results_nc
from model import MAGNN_nc
# Params
out_dim = 3
dropout_rate = 0.5
lr = 0.005
we... | 8,734 | 47.259669 | 133 | py |
MAGNN | MAGNN-master/run_LastFM.py | import time
import argparse
import torch
import torch.nn.functional as F
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
from utils.pytorchtools import EarlyStopping
from utils.data import load_LastFM_data
from utils.tools import index_generator, parse_minibatch_LastFM
from model... | 13,301 | 57.087336 | 145 | py |
MAGNN | MAGNN-master/utils/pytorchtools.py | import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience, verbose=False, delta=0, save_path='checkpoint.pt'):
"""
Args:
patience (int): How long to wait after last time... | 1,824 | 36.244898 | 111 | py |
MAGNN | MAGNN-master/utils/tools.py | import torch
import dgl
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, normalized_mutual_info_score, adjusted_rand_score
from sklearn.cluster import KMeans
from sklearn.svm import LinearSVC
def idx_to_one_hot(idx_arr):
one_hot = np.zeros((idx_arr.shap... | 11,404 | 46.128099 | 174 | py |
MAGNN | MAGNN-master/model/MAGNN_lp.py | import torch
import torch.nn as nn
import numpy as np
from model.base_MAGNN import MAGNN_ctr_ntype_specific
# for link prediction task
class MAGNN_lp_layer(nn.Module):
def __init__(self,
num_metapaths_list,
num_edge_type,
etypes_lists,
in_dim,
... | 5,609 | 41.824427 | 115 | py |
MAGNN | MAGNN-master/model/MAGNN_nc_mb.py | import torch
import torch.nn as nn
import numpy as np
from model.base_MAGNN import MAGNN_ctr_ntype_specific
# support for mini-batched forward
# only support one layer for one ctr_ntype
class MAGNN_nc_mb_layer(nn.Module):
def __init__(self,
num_metapaths,
num_edge_type,
... | 4,483 | 38.333333 | 119 | py |
MAGNN | MAGNN-master/model/MAGNN_nc.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model.base_MAGNN import MAGNN_ctr_ntype_specific
fc_switch = False
# multi-layer support
class MAGNN_nc_layer(nn.Module):
def __init__(self,
num_metapaths_list,
num_edge_type,
... | 5,888 | 41.985401 | 148 | py |
MAGNN | MAGNN-master/model/base_MAGNN.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
class MAGNN_metapath_specific(nn.Module):
def __init__(self,
etypes,
out_dim,
num_heads,
rnn_type='gru',
... | 11,914 | 46.66 | 168 | py |
DataSelectionMaps | DataSelectionMaps-master/src/addexperiments.py | import math
import random
import numpy as np
import tensorflow as tf
from data import Dataset
from prediction import train_model, test_model
from prediction import load_encoder_and_predictor_weights
import activelearning
def test_sequence_importance_AL(
HYPER,
models,
raw_data,
training_data,
da... | 14,310 | 26.001887 | 89 | py |
DataSelectionMaps | DataSelectionMaps-master/src/prediction.py | import math
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.ensemble import RandomForestRegressor
class EncodersAndPredictor:
""" Keeps prediction and encoding models together. """
def __init__(
self,
X_t_encoder,
X_s1_encoder,
... | 39,102 | 30.688006 | 88 | py |
DataSelectionMaps | DataSelectionMaps-master/src/activelearning.py | import math
import timeit
import time
import random
import numpy as np
import tensorflow as tf
import scipy
from sklearn.preprocessing import OrdinalEncoder
from data import Dataset
from prediction import train_model, test_model
from prediction import load_encoder_and_predictor_weights
from prediction import initiali... | 33,121 | 29.95514 | 87 | py |
DataSelectionMaps | DataSelectionMaps-master/src/data.py | import datetime
import math
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
from skimage.transform import rescale
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
class RawDat... | 48,864 | 30.771782 | 92 | py |
DataSelectionMaps | DataSelectionMaps-master/src/vis_addresults.py | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.lines import Line2D
import tensorflow as tf
class HyperParameterAdditionalVisualizing:
""" Keeps hyper parameters together for visualizing results
"""
SAVE_RESULTS = True
... | 44,983 | 43.894212 | 141 | py |
DataSelectionMaps | DataSelectionMaps-master/src/hyperparameters.py | from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.metrics.pairwise import rbf_kernel, laplacian_kernel, cosine_similarity
import tensorflow as tf
import time
class HyperParameter:
""" Keeps hyper parameters together for four categories:
1. active learning algorithm
2. hypothesis and predic... | 19,606 | 36.275665 | 86 | py |
constopt-pytorch | constopt-pytorch-master/setup.py | from distutils.core import setup
import io
import setuptools
CLASSIFIERS = """\
Development Status :: 2 - Pre-Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating Sy... | 866 | 27.9 | 69 | py |
constopt-pytorch | constopt-pytorch-master/examples/training_MNIST_with_FW.py | """Trains a LeNet5 model on MNIST using constraints on the weights.
"""
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from easydict import EasyDict
from advertorch.test_utils import LeNet5
from advertorch_examples.utils import get_mnist_train_loader
from advertorch_examples.utils import ... | 2,625 | 28.505618 | 91 | py |
constopt-pytorch | constopt-pytorch-master/examples/optimizer_dynamics.py | """Sets up simple 2-d problems on Linf balls to see dynamics of different constrained optimization algorithms."""
import matplotlib.pyplot as plt
import numpy as np
import torch
from constopt.constraints import LinfBall
from constopt.optim import PGD, PGDMadry, FrankWolfe, MomentumFrankWolfe
torch.random.manual_seed(... | 2,806 | 31.264368 | 113 | py |
constopt-pytorch | constopt-pytorch-master/examples/adversarial_robustness/attack_benchmark.py | from functools import partial
import torch
from tqdm import tqdm
import constopt as cpt
from constopt.data import load_cifar10
from robustbench.utils import load_model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 200
n_examples = 10000
loader = load_cifar10(batch_size=batch_s... | 2,396 | 26.238636 | 73 | py |
constopt-pytorch | constopt-pytorch-master/examples/adversarial_robustness/attacking_robust_bench.py | import torch
from robustbench.data import load_cifar10
from robustbench.utils import load_model
from constopt.adversary import Adversary
from constopt.optim import PGD, PGDMadry, FrankWolfe, MomentumFrankWolfe
from constopt.constraints import LinfBall
device = torch.device("cuda" if torch.cuda.is_available() else 'c... | 1,199 | 34.294118 | 76 | py |
constopt-pytorch | constopt-pytorch-master/examples/adversarial_robustness/cifar10.py | import os
from argparse import ArgumentParser
from easydict import EasyDict
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchvision.models import resnet18
import constopt
from constopt.adversary import Adversary
from constopt.op... | 6,889 | 35.455026 | 118 | py |
constopt-pytorch | constopt-pytorch-master/examples/adversarial_robustness/mnist.py | from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from easydict import EasyDict
from advertorch.test_utils import LeNet5
from advertorch_examples.utils import get_mnist_train_loader
from advertorch_examples.utils import get_mnist_test_loader
import constopt
from constopt.adversary import Ad... | 5,423 | 41.708661 | 119 | py |
constopt-pytorch | constopt-pytorch-master/tests/test_adversary.py | """Testing our adversarial attacks"""
import pytest
import shutil
import torch
from torch import nn
import numpy as np
from cox.store import Store
import constopt
from constopt import optim
from constopt.adversary import Adversary
OUT_DIR = "logging/tests/test_adversary/"
shutil.rmtree(OUT_DIR, ignore_errors=True)... | 4,854 | 34.181159 | 104 | py |
constopt-pytorch | constopt-pytorch-master/tests/test_utils.py | """Tests for utility functions"""
import torch
from torch import nn
from constopt import opt_utils
# Set up random regression problem
alpha = 1.
n_samples, n_features = 20, 15
X = torch.rand((n_samples, n_features))
w = torch.rand(n_features)
w = alpha * w / sum(abs(w))
y = X.mv(w)
# Logistic regression: \|y\|_\inft... | 617 | 17.727273 | 55 | py |
constopt-pytorch | constopt-pytorch-master/tests/test_optim.py | """Tests for constrained optimizers"""
import numpy as np
import torch
from torch.autograd import Variable
import pytest
import shutil
from cox.store import Store
import constopt
from constopt import optim
OUT_DIR = "logging/tests/test_optim"
shutil.rmtree(OUT_DIR, ignore_errors=True)
MAX_ITER = 300
torch.manual_s... | 2,073 | 28.628571 | 87 | py |
constopt-pytorch | constopt-pytorch-master/constopt/optim.py | """This API is inspired by the COPT project
https://github.com/openopt/copt.
This module contains full gradient optimizers in PyTorch."""
import torch
import numpy as np
from constopt import opt_utils
def minimize_three_split(
closure,
x0,
prox1=None,
prox2=None,
tol=1e-6,
max_iter=1000,
... | 5,166 | 28.02809 | 105 | py |
constopt-pytorch | constopt-pytorch-master/constopt/stochastic.py | """This module contains stochastic first order optimizers.
These are meant to be used in replacement of optimizers such as SGD, Adam etc,
for training a model over batches of a dataset."""
import warnings
import torch
from torch.optim import Optimizer
import numpy as np
EPS = np.finfo(np.float32).eps
def backtra... | 8,968 | 32.718045 | 91 | py |
constopt-pytorch | constopt-pytorch-master/constopt/data_utils.py | from easydict import EasyDict
import torch
import torchvision
from torchvision import transforms
def ld_cifar10():
"""Load training and test data."""
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = torchvision.... | 793 | 40.789474 | 116 | py |
constopt-pytorch | constopt-pytorch-master/constopt/adversary.py | import torch
from torch.autograd import Variable
import numpy as np
class Adversary:
def __init__(self, shape, constraint, optimizer_class,
device=None, random_init=False):
if random_init:
self.delta = Variable(constraint.random_point(shape))
else:
self.del... | 2,784 | 35.168831 | 95 | py |
constopt-pytorch | constopt-pytorch-master/constopt/data.py | import torch
import torchvision.datasets as datasets
import torch.utils.data as data
import torchvision.transforms as transforms
def load_cifar10(batch_size=100, data_dir='./data'):
transform_chain = transforms.Compose([transforms.ToTensor()])
item = datasets.CIFAR10(root=data_dir, train=False, transform=tran... | 450 | 33.692308 | 97 | py |
constopt-pytorch | constopt-pytorch-master/constopt/constraints.py | from copy import deepcopy
from collections import defaultdict
import torch
import numpy as np
from scipy.stats import expon
from torch.distributions import Laplace, Normal
# TODO: Add projections to the constraints, and write ProjectedOptimizer wrapper/decorator
"""This uses an API similar to the one for
the COPT pr... | 8,705 | 31.977273 | 90 | py |
drmad | drmad-master/cpu_ver/hyperserver/loaddataSubClass.py | import itertools
import os
import pickle
import numpy as np
from hypergrad.util import dictslice
from hypergrad.mnist import random_partition
def datapath(fname):
project_dir = os.environ['EXPERI_PROJECT_PATH']
datadir = project_dir+"/library/hypergrad/data/mnist"
# datadir = os.path.expanduser('/Users/... | 5,924 | 30.020942 | 126 | py |
TextZoom | TextZoom-master/src/dataset/dataset.py | #!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
import bisect
import warnings
from PIL import Image
import numpy as np
import string
sys.path.append('.... | 10,023 | 30.522013 | 149 | py |
TextZoom | TextZoom-master/src/dataset/voc_data.py | import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import cv2
import sys
import os
import bisect
import warnings
from PIL import Image
import numpy as np
import string
sys.path.append('../')
from utils ... | 3,076 | 29.166667 | 152 | py |
TextZoom | TextZoom-master/src/loss/gradient_loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from PIL import Image
from IPython import embed
from torchvision import transforms
class GradientPriorLoss(nn.Module):
def __init__(self, ):
super(GradientPriorLoss, self).__init__()
self.func = nn.L1Loss()
... | 961 | 28.151515 | 89 | py |
TextZoom | TextZoom-master/src/loss/percptual_loss.py | import torch
from torch import nn
from torchvision.models.vgg import vgg16
from IPython import embed
class GeneratorLoss(nn.Module):
def __init__(self):
super(GeneratorLoss, self).__init__()
vgg = vgg16(pretrained=True)
loss_network = nn.Sequential(*list(vgg.features)[:31]... | 1,703 | 31.150943 | 104 | py |
TextZoom | TextZoom-master/src/loss/image_loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from PIL import Image
from IPython import embed
from torchvision import transforms
class ImageLoss(nn.Module):
def __init__(self, gradient=True, loss_weight=[20, 1e-4]):
super(ImageLoss, self).__init__()
self.mse... | 1,832 | 31.157895 | 105 | py |
TextZoom | TextZoom-master/src/utils/utils_moran.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
class strLabelConverterForAttention(object):
"""Convert between str and label.
NOTE:
Insert `EOS` to the alphabet for attention.
Args:
alphabet (str): set of the possible characters.
ignore_c... | 4,733 | 30.350993 | 136 | py |
TextZoom | TextZoom-master/src/utils/ssim_psnr.py | from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from IPython import embed
def calculate_psnr(img1, img2):
# img1 and img2 have range [0, 1]
mse = ((img1[:,:3,:,:]*255 - img2[:,:3,:,:]*255)**2).mean()
if mse == 0:
return float('inf')
retur... | 2,962 | 30.860215 | 114 | py |
TextZoom | TextZoom-master/src/utils/util.py | #!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
import string
from IPython import embed
def str_filt(str_, voc_type):
alpha_dict = {
'digit': string.digits,
'lower': string.digits + string.ascii_lowercase,
'uppe... | 5,069 | 28.823529 | 136 | py |
TextZoom | TextZoom-master/src/utils/metrics.py | from __future__ import absolute_import
import numpy as np
import editdistance
import string
import math
from IPython import embed
import torch
import torch.nn.functional as F
import sys
sys.path.append('../')
from utils import to_torch, to_numpy
def _normalize_text(text):
text = ''.join(filter(lambda x: x in (st... | 6,827 | 35.709677 | 134 | py |
TextZoom | TextZoom-master/src/utils/utils_crnn.py | #!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
class strLabelConverter(object):
"""Convert between str and label.
NOTE:
Insert `blank` to the alphabet for CTC.
Args:
alphabet (str): set of the possible charac... | 4,529 | 29.2 | 136 | py |
TextZoom | TextZoom-master/src/utils/labelmaps.py | from __future__ import absolute_import
import torch
import string
def get_vocabulary(voc_type, EOS='EOS', PADDING='PADDING', UNKNOWN='UNKNOWN'):
'''
voc_type: str: one of 'LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS'
'''
voc = None
types = ['digit', 'lower', 'upper', 'all']
if voc_type == 'digit... | 2,084 | 25.392405 | 78 | py |
TextZoom | TextZoom-master/src/interfaces/base.py | import torch
import sys
import os
from tqdm import tqdm
import math
import torch.nn as nn
import torch.optim as optim
from IPython import embed
import math
import cv2
import string
from PIL import Image
import torchvision
from torchvision import transforms
from torch.autograd import Variable
from collections import Ord... | 15,848 | 46.737952 | 127 | py |
TextZoom | TextZoom-master/src/interfaces/super_resolution.py | import torch
import sys
import time
import os
from time import gmtime, strftime
from datetime import datetime
from tqdm import tqdm
import math
import copy
from utils import util, ssim_psnr
from IPython import embed
from torchvision import transforms
from torch.autograd import Variable
import torch.nn as nn
from thop i... | 17,246 | 47.858357 | 126 | py |
TextZoom | TextZoom-master/src/model/bicubic.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class BICUBIC(object):
def __init__(self, scale_factor=2):
super(BICUBIC).__init__()
self.scale_factor = scale_factor
def __call__(self, x):
out = F.interpolate(x, scale_factor=self.scale_factor, mode='bicubic', align_... | 353 | 24.285714 | 98 | py |
TextZoom | TextZoom-master/src/model/tsrn.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from collections import OrderedDict
import sys
from torch.nn import init
import numpy as np
from IPython import embed
sys.path.append('./')
sys.path.append('../')
from .recognizer.tps_spatial_transformer import TPSSpatialTransformer
from .re... | 5,214 | 33.766667 | 112 | py |
TextZoom | TextZoom-master/src/model/rdn.py | import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from IPython import embed
class sub_pixel(nn.Module):
def __init__(self, scale, act=False):
super(sub_pixel, self).__init__()
modules = []
modules.append(nn.... | 3,000 | 30.925532 | 113 | py |
TextZoom | TextZoom-master/src/model/lapsrn.py | import torch
import torch.nn as nn
import numpy as np
import math
from IPython import embed
from .recognizer.tps_spatial_transformer import TPSSpatialTransformer
from .recognizer.stn_head import STNHead
def get_upsample_filter(size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (size + 1) ... | 5,908 | 40.907801 | 125 | py |
TextZoom | TextZoom-master/src/model/attention_recognition_head.py | from __future__ import absolute_import
import sys
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
class AttentionRecognitionHead(nn.Module):
"""
input: [b x 16 x 64 x in_planes]
output: probability sequence: [b x T x num_classes]
"""
def __init__(self, num_... | 10,230 | 38.049618 | 136 | py |
TextZoom | TextZoom-master/src/model/srcnn.py | import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
from torch.autograd import Variable
import torchvision.datasets as d_sets
from torch.utils.data import DataLoader as d_loader
import matplotlib.pyplot as plt
from PIL import Image
from IPython import emb... | 1,961 | 32.827586 | 85 | py |
TextZoom | TextZoom-master/src/model/net.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from collections import OrderedDict
import sys
from torch.nn import init
import numpy as np
from IPython import embed
sys.path.append('./')
sys.path.append('../')
from .recognizer.tps_spatial_transformer import TPSSpatialTransformer
from .re... | 4,920 | 33.65493 | 97 | py |
TextZoom | TextZoom-master/src/model/edsr.py | import torch
import torch.nn as nn
import math
from IPython import embed
class MeanShift(nn.Conv2d):
def __init__(self, rgb_mean, sign):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.bias.data = float(sign) * torch.Tensor(rgb_mea... | 3,067 | 32.714286 | 117 | py |
TextZoom | TextZoom-master/src/model/vdsr.py | import torch
import torch.nn as nn
from math import sqrt
from IPython import embed
import sys
sys.path.append('./')
from .recognizer.tps_spatial_transformer import TPSSpatialTransformer
from .recognizer.stn_head import STNHead
class Conv_ReLU_Block(nn.Module):
def __init__(self):
super(Conv_ReLU_Block, se... | 2,526 | 34.591549 | 119 | py |
TextZoom | TextZoom-master/src/model/srresnet.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from collections import OrderedDict
import sys
sys.path.append('./')
from .recognizer.tps_spatial_transformer import TPSSpatialTransformer
from .recognizer.stn_head import STNHead
from IPython import embed
class SRResNet(nn.Module):
def... | 4,842 | 32.171233 | 97 | py |
TextZoom | TextZoom-master/src/model/esrgan.py | import functools
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from IPython import embed
def make_layer(block, n_layers):
layers = []
for _ in range(n_layers):
layers.append(block())
return nn.Sequential(*layers)
class ResidualDenseBlock_5C(nn.Module):
def __... | 5,620 | 37.765517 | 117 | py |
TextZoom | TextZoom-master/src/model/rrdb.py | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from IPython import embed
def make_layer(block, n_layers):
layers = []
for _ in range(n_layers):
layers.append(block())
return nn.Sequential(*layers)
class ResidualDenseBlock_5C(nn.Module):
def __init__(self,... | 5,563 | 35.366013 | 117 | py |
TextZoom | TextZoom-master/src/model/crnn/crnn.py | import torch.nn as nn
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent,... | 2,554 | 30.9375 | 78 | py |
TextZoom | TextZoom-master/src/model/moran/moran.py | import torch.nn as nn
from .morn import MORN
from .asrn_res import ASRN
class MORAN(nn.Module):
def __init__(self, nc, nclass, nh, targetH, targetW, BidirDecoder=False,
inputDataType='torch.cuda.FloatTensor', maxBatch=256, CUDA=True):
super(MORAN, self).__init__()
self.MORN = MOR... | 877 | 37.173913 | 82 | py |
TextZoom | TextZoom-master/src/model/moran/asrn_res.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from .fracPickup import fracPickup
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, ... | 10,285 | 38.561538 | 135 | py |
TextZoom | TextZoom-master/src/model/moran/fracPickup.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import numpy.random as npr
class fracPickup(nn.Module):
def __init__(self, CUDA=True):
super(fracPickup, self).__init__()
self.cuda = CUDA
def forward(self, x):
x_shape = x.size()
assert ... | 1,425 | 28.102041 | 64 | py |
TextZoom | TextZoom-master/src/model/moran/morn.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class MORN(nn.Module):
def __init__(self, nc, targetH, targetW, inputDataType='torch.cuda.FloatTensor', maxBatch=256, CUDA=True):
super(MORN, self).__init__()
self.targetH = targetH
self.targetW = targ... | 6,084 | 42.464286 | 130 | py |
TextZoom | TextZoom-master/src/model/recognizer/recognizer_builder.py | from __future__ import absolute_import
from PIL import Image
import numpy as np
from collections import OrderedDict
import sys
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
sys.path.append('./')
from .resnet_aster import *
from .attention_recognition_head import Atte... | 3,906 | 34.518182 | 122 | py |
TextZoom | TextZoom-master/src/model/recognizer/stn_head.py | from __future__ import absolute_import
import math
import numpy as np
import sys
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
from IPython import embed
def conv3x3_block(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
conv_layer = nn.Conv2... | 3,635 | 32.981308 | 92 | py |
TextZoom | TextZoom-master/src/model/recognizer/attention_recognition_head.py | from __future__ import absolute_import
import sys
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
from IPython import embed
class AttentionRecognitionHead(nn.Module):
"""
input: [b x 16 x 64 x in_planes]
output: probability sequence: [b x T x num_classes]
"""
... | 10,415 | 37.865672 | 136 | py |
TextZoom | TextZoom-master/src/model/recognizer/resnet_aster.py | import torch
import torch.nn as nn
import torchvision
import sys
import math
#
# from config import get_args
# global_args = get_args(sys.argv[1:])
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
... | 3,924 | 28.074074 | 88 | py |
TextZoom | TextZoom-master/src/model/recognizer/sequenceCrossEntropyLoss.py | from __future__ import absolute_import
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
def _assert_no_grad(variable):
assert not variable.requires_g... | 2,054 | 31.109375 | 73 | py |
TextZoom | TextZoom-master/src/model/recognizer/tps_spatial_transformer.py |
from __future__ import absolute_import
import numpy as np
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from IPython import embed
def grid_sample(input, grid, canvas = None):
output = F.grid_sample(input, grid)
if canvas is None:
return output
else:
input_mask = ... | 4,978 | 40.840336 | 100 | py |
multitask_impute | multitask_impute-master/OmiEmbed/models/vae_survival_model.py | import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
class VaeSurvivalModel(VaeBasicModel):
"""
This class implements the VAE survival model, using the VAE framework with the survival prediction downstream task.
"""
@staticmethod
def modify_commandli... | 5,390 | 38.933333 | 151 | py |
multitask_impute | multitask_impute-master/OmiEmbed/models/losses.py | import torch
import torch.nn as nn
def get_loss_func(loss_name, reduction='mean'):
"""
Return the loss function.
Parameters:
loss_name (str) -- the name of the loss function: BCE | MSE | L1 | CE
reduction (str) -- the reduction method applied to the loss function: sum | mean
"""
... | 2,558 | 32.671053 | 176 | py |
multitask_impute | multitask_impute-master/OmiEmbed/models/vae_alltask_gn_model.py | import torch
import torch.nn as nn
from .basic_model import BasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeAlltaskGNModel(BasicModel):
"""
This class implements the VAE multitasking model with GradNorm (all tasks), using the VAE... | 17,700 | 47.231608 | 382 | py |
multitask_impute | multitask_impute-master/OmiEmbed/models/vae_regression_model.py | import torch
from sklearn import metrics
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
class VaeRegressionModel(VaeBasicModel):
"""
This class implements the VAE regression model, using the VAE framework with the regression downstream task.
"""
@staticmethod
... | 3,793 | 37.323232 | 152 | py |
multitask_impute | multitask_impute-master/OmiEmbed/models/vae_alltask_model.py | import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeAlltaskModel(VaeBasicModel):
"""
This class implements the VAE multitasking model with all downstream tasks (5 classifiers + 1 regressor... | 10,265 | 49.078049 | 371 | py |
multitask_impute | multitask_impute-master/OmiEmbed/models/networks.py | import torch
import torch.nn as nn
import functools
from torch.nn import init
from torch.optim import lr_scheduler
# Class components
class DownSample(nn.Module):
"""
SingleConv1D module + MaxPool
The output dimension = input dimension // down_ratio
"""
def __init__(self, input_chan_num, output_c... | 105,720 | 45.799911 | 202 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.