repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/optim/model.py | """
Title: model.py
Description: The main classes for models, which will load the trainer and save results.
Note: Need to check compatibility.
"""
try:
import torch_xla.core.xla_model as xm
except:
pass
from .trainer import Trainer
from .trainer_noisy import NoisyTrainer
from helper import utils, algo
from ne... | 7,772 | 35.838863 | 92 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/optim/trainer_noisy.py | """
Title: trainer_noisy.py
Description: A simple trainer for noisy setting.
"""
from helper import utils, algo
from .base_trainer import BaseTrainer
from torch.nn import functional as F
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.lr_scheduler import ExponentialLR
from nngeometry.object import PM... | 20,279 | 45.090909 | 93 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/optim/trainer.py | """
Title: trainer.py
Description: A simple trainer.
"""
from helper import utils, algo
from .base_trainer import BaseTrainer
from helper.regularizer import JacobianReg
from torch.nn import functional as F
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.lr_scheduler import ExponentialLR
from nngeomet... | 19,896 | 41.881466 | 93 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/helper/algo.py | """
[Title] algo.py
[Use] A helper file for training algorithms.
"""
from torch import nn
import torch
import torch.nn.functional as F
def top_k_idx(vec,
k: int=128,
largest: bool=True):
"""
Returns the idx (indices) of the x largest/smallest entries in vec.
Args:
v... | 2,088 | 25.782051 | 77 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/helper/hessian.py | """
Title: hessian.py
Description: Helper function for calculating hessian eigenvalues.
Source:
Vardan Papyan, The Full Spectrum of Deepnet Hessians at Scale,
https://github.com/AnonymousNIPS2019/DeepnetHessian,
NIPS 2019.
"""
import sys
import torch
import numpy as np
import torch.nn as nn
import torch.nn... | 18,376 | 29.076923 | 142 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/helper/pruner.py | """
[Title] pruner.py
[Description] The simplest prune schedule by PyTorch.
"""
from torch import nn
from functools import reduce
import torch.nn.utils.prune as torch_prune
def global_prune(net: nn.Module,
prune_method: str='l1',
prune_ratio: float=0.6,
prune_last: ... | 4,181 | 31.671875 | 86 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/helper/utils.py | """
[Title] utils.py
[Use] A general helper file.
[TOC] 1. General helper functions;
2. Helpers for networks;
3. Helpers for optimizers;
4. Calculating SNR;
5. Calculating Fisher information.
"""
from network.main import build_network
from .pruner import global_prune
from nngeometry.layercolle... | 44,745 | 31.925681 | 94 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/helper/regularizer.py | """
[Title] regularizer.py
[Description] Different implementations of regularizers.
"""
from __future__ import division
import torch
import torch.nn as nn
import torch.autograd as autograd
class JacobianReg(nn.Module):
"""
Intuitively, Jacobian regularization is a model-agnostic way to
increase the class... | 2,966 | 29.587629 | 74 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/helper/plotter.py | """
[Title] plotter.py
[Description] The function will be directly called in run.py to generate plots.
"""
from pathlib import Path
import torch
import joblib
import seaborn as sea
import matplotlib.pyplot as plt
import numpy as np
# ##########################################################
# Helper Function to smoo... | 18,683 | 38.837953 | 87 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/base_net.py | """
Title: base_net.py
Description: The base network.
Reference: https://github.com/lukasruff/Deep-SAD-PyTorch/tree/master/src/networks
"""
import logging
import numpy as np
import torch.nn as nn
# #########################################################################
# 1. Base Net
# #############################... | 897 | 27.0625 | 81 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/mnist_alexnet.py | """
Title: mnist_alexnet.py
Description: The file for alexnet of mnist.
"""
from .base_net import BaseNet
import torch.nn as nn
class MNISTAlexNet(BaseNet):
def __init__(self):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1), # 32 * 28 * 28
... | 1,544 | 28.150943 | 72 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/dense_net.py | """
Title: cifar_densenet.py
Description: The file for densenet of cifar.
Warning: not test the file yet!! (Feb 13, 2022)
"""
from .base_net import BaseNet
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
class Bottleneck(nn.Module):
def __init__... | 4,851 | 30.921053 | 99 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/mlp.py | """
Title: mlp.py
Description: The file for a fully connected network.
"""
from .base_net import BaseNet
from helper import utils
import torch.nn as nn
class MLP(BaseNet):
def __init__(self,
in_dim: int=12,
out_dim: int=2,
hidden_act: str='tanh',
... | 1,256 | 27.568182 | 79 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/mnist_lenet.py | """
Title: mnist_alexnet.py
Description: The file for alexnet of mnist.
Warning: not test the file yet!! (Feb 13, 2022)
"""
from .base_net import BaseNet
import torch.nn as nn
class MNISTLeNet(BaseNet):
def __init__(self):
super(MNISTLeNet, self).__init__()
self.cnn_model = nn.Sequential( ... | 1,201 | 33.342857 | 104 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/res_net.py | """
Title: cifar_resnet.py
Description: The file for resnet of cifar.
Warning: not test the file yet!! (Feb 13, 2022)
"""
from __future__ import absolute_import
from .base_net import BaseNet
import math
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return... | 4,675 | 29.562092 | 78 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/preres_net.py | from .base_net import BaseNet
import math
import torch.nn as nn
__all__ = ['preresnet']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module)... | 4,353 | 28.619048 | 77 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/vgg_net.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
from .base_net import BaseNet
import torch.nn as nn
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30a... | 2,452 | 30.050633 | 108 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/network/alex_net.py | """
Title: cifar_alexnet.py
Description: The file for alexnet of cifar.
Warning: not test the file yet!! (Feb 13, 2022)
"""
from .base_net import BaseNet
import torch
import torch.nn as nn
import torch.nn.functional as F
class AlexNet(BaseNet):
def __init__(self,
out_dim=100):
super(AlexN... | 1,208 | 30.815789 | 66 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_fmnist_noisy.py | """
Title: fMNIST_loader.py
Description: The loader classes for the MNIST datasets.
Note: Haven't test the file yet! (Feb 13, 2022)
"""
from .loader_base import BaseLoader
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import ConcatDataset
from torchvision.datasets import FashionMN... | 9,328 | 34.743295 | 88 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_cifar100.py | """
Title: CIFAR100_loader.py
Description: The loader classes for the CIFAR-10 datasets
Note: Haven't test the file yet! (Feb 13, 2022)
"""
from .loader_base import BaseLoader
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import ConcatDataset
from torchvision.datasets import CIFAR... | 8,049 | 37.888889 | 88 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_toy.py | """
Title: loader_toy.py
Description: Loading pickled toy datasets.
"""
from .loader_base import BaseLoader
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
import os
import torch
import joblib
# #######################################... | 3,714 | 34.721154 | 91 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_cifar10_noisy.py | """
Title: loader_cifar10_noisy.py
Description: The loader classes for the CIFAR-10 datasets.
"""
from .loader_base import BaseLoader
from PIL import Image
from torch.utils.data import Subset, DataLoader
from torchvision.datasets import CIFAR10
import torchvision
import numpy as np
import torch
import torchvision.tran... | 9,276 | 34.408397 | 88 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_cifar10.py | """
Title: cifar10_loader.py
Description: The loader classes for the CIFAR-10 datasets
Note: Haven't test the file yet! (Feb 13, 2022)
"""
from .loader_base import BaseLoader
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import ConcatDataset
from torchvision.datasets import CIFAR1... | 4,809 | 36.286822 | 88 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_tiny_imagenet.py | """
Title: loader_tiny_imagent.py
Description: The loader classes for the imagenet datasets.
"""
from .loader_base import BaseLoader
from pathlib import Path
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torch.utils.data.distributed import DistributedSampler
import os
impor... | 4,865 | 35.044444 | 91 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_fmnist.py | """
Title: fMNIST_loader.py
Description: The loader classes for the MNIST datasets.
Note: Haven't test the file yet! (Feb 13, 2022)
"""
from .loader_base import BaseLoader
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import ConcatDataset
from torchvision.datasets import FashionMN... | 3,247 | 34.304348 | 81 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_base.py | """
Title: loader_base.py
Description: The base trainer and evaluater.
"""
from abc import ABC, abstractmethod
# #########################################################################
# 1. Base Loader
# #########################################################################
class BaseLoader(ABC):
def __init... | 987 | 28.939394 | 75 | py |
Generalization-and-Memorization-in-Sparse-Training | Generalization-and-Memorization-in-Sparse-Training-main/loader/loader_mnist.py | """
Title: MNIST_loader.py
Description: The loader classes for the MNIST datasets.
Note: Haven't test the file yet! (Feb 13, 2022)
"""
from .loader_base import BaseLoader
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import ConcatDataset
from torchvision.datasets import MNIST
imp... | 3,030 | 33.443182 | 81 | py |
AdaQP | AdaQP-master/AdaQP/trainer/runtime_util.py | import logging
import time
import torch
from typing import Any, List, Tuple, Union
from dgl import DGLHeteroGraph
from torch import Tensor
from torch import nn
from torch.optim import Optimizer
import numpy as np
from ..helper import BitType
from ..communicator import Communicator as comm
from ..manager import GraphEn... | 7,543 | 35.980392 | 256 | py |
AdaQP | AdaQP-master/AdaQP/trainer/trainer.py | import os
import csv
import yaml
import torch
from argparse import Namespace
from typing import Dict, Tuple
from .runtime_util import *
from ..helper import DistGNNType, BitType
from ..model import DistGCN, DistSAGE
from ..communicator import Communicator as comm
from ..manager import GraphEngine as engine
# supporte... | 12,299 | 49.204082 | 343 | py |
AdaQP | AdaQP-master/AdaQP/manager/processing.py | import dgl
import torch
import logging
from torch import Tensor
from dgl.distributed import GraphPartitionBook
from typing import Dict, Tuple
import numpy as np
from ..communicator import Communicator as comm
from ..communicator import Basic_Buffer_Type
from ..helper import DistGNNType
logger = logging.getLogger('tra... | 6,117 | 52.2 | 239 | py |
AdaQP | AdaQP-master/AdaQP/manager/conversion.py | import dgl
import torch
from dgl import DGLHeteroGraph
from dgl.distributed import GraphPartitionBook
from torch import Tensor, BoolTensor
from typing import Dict, Tuple
from ..communicator import Communicator as comm
from ..communicator import Basic_Buffer_Type
'''
*************************************************
*... | 9,200 | 51.87931 | 240 | py |
AdaQP | AdaQP-master/AdaQP/manager/graphEngine.py | import dgl
from multiprocessing import Event
from multiprocessing.pool import ThreadPool
from typing import List, Tuple, Union
from dgl import DGLHeteroGraph
from torch import Tensor
from ..util import Timer, Recorder
from .conversion import *
from .processing import *
from ..communicator import Communicator as comm
f... | 9,381 | 39.791304 | 169 | py |
AdaQP | AdaQP-master/AdaQP/communicator/comm.py | import os
import logging
import torch
import torch.distributed as dist
from torch import Tensor
from typing import Dict, List, Any, Tuple
from queue import Queue
from .buffer import CommBuffer, Basic_Buffer_Type
from ..helper import MessageType
logger = logging.getLogger('trainer')
class Communicator(object):
''... | 8,586 | 32.940711 | 199 | py |
AdaQP | AdaQP-master/AdaQP/communicator/buffer.py | import torch
import logging
from torch import Tensor
from typing import Dict, List, Tuple, Union, NewType
import torch.distributed as dist
from ..helper import BitType
logger = logging.getLogger('trainer')
# typing definition
# buffer structure: (pid->messages/(messages, params))
Basic_Buffer_Type = NewType('Basic_Bu... | 14,201 | 52.592453 | 164 | py |
AdaQP | AdaQP-master/AdaQP/util/timer.py | import time
import torch
import os
from contextlib import contextmanager
from ..helper import BitType
from ..communicator import Communicator as comm
class Timer(object):
def __init__(self, device: torch.device):
super(Timer, self).__init__()
self._record = {}
self._total_record = []
... | 2,279 | 33.029851 | 110 | py |
AdaQP | AdaQP-master/AdaQP/util/recorder.py | import time
import logging
import torch
from typing import Any, List, Union
logger = logging.getLogger('trainer')
class Recorder(object):
def __init__(self, epoches: int):
self.epoches_metrics = torch.zeros(epoches, 3) # store each epoch's train/val/test metrics
def add_new_metrics(self, epoch_count... | 1,965 | 48.15 | 252 | py |
AdaQP | AdaQP-master/AdaQP/util/quantization/setup.py | from setuptools import setup, find_packages
from torch.utils import cpp_extension
setup(name='quant_cuda',
ext_modules=[
cpp_extension.CUDAExtension(
'quant_cuda',
['src/quantization.cc',
'src/quantization_cuda_kernel.cu'],
extra_compile_args={'n... | 475 | 28.75 | 69 | py |
AdaQP | AdaQP-master/AdaQP/helper/dataset.py | import os
import ssl
import sys
import urllib
import json
import dgl
import torch
from dgl.data.dgl_dataset import DGLDataset
from dgl import DGLHeteroGraph
from typing import Optional
from sklearn.preprocessing import StandardScaler
import numpy as np
import scipy.sparse as sp
# Amazon dataset
def download_url(url: s... | 6,230 | 37.462963 | 138 | py |
AdaQP | AdaQP-master/AdaQP/helper/partition.py | import os
import dgl
import torch
from dgl import DGLHeteroGraph
from ogb.nodeproppred import DglNodePropPredDataset
from dgl.data import RedditDataset
from .dataset import AmazonProducts, load_yelp
def process_obg_dataset(dataset: str, raw_dir: str) -> DGLHeteroGraph:
'''
process the ogb dataset, return a dg... | 2,853 | 37.567568 | 115 | py |
AdaQP | AdaQP-master/AdaQP/model/distGCN.py | import torch
from typing import Any, Union
from torch import Tensor
from dgl import DGLHeteroGraph
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.nn as nn
import torch.nn.functional as F
from .ops import DistAggConv
from ..manager import DecompGraph
class DistGCNConv(nn.Module):
'... | 3,276 | 37.552941 | 128 | py |
AdaQP | AdaQP-master/AdaQP/model/op_util.py | import torch
from typing import Dict, Tuple
from functools import wraps
from typing import Tuple
from torch import Tensor
import quant_cuda as integer_quantizer
from ..helper import BitType
from ..communicator import Basic_Buffer_Type
from ..communicator import Communicator as comm
from ..manager import GraphEngine as... | 10,260 | 41.053279 | 270 | py |
AdaQP | AdaQP-master/AdaQP/model/distSAGE.py | import dgl
import torch
from typing import Any, Union
from torch import Tensor
from dgl import DGLHeteroGraph
from torch.nn.parameter import Parameter
from torch.nn import init
import torch.nn as nn
import torch.nn.functional as F
from .ops import DistAggSAGE
from ..manager import DecompGraph
class DistSAGEConv(nn.M... | 3,968 | 39.917526 | 160 | py |
AdaQP | AdaQP-master/AdaQP/model/ops.py | import dgl
import torch
from typing import Any, Tuple, Union
from functools import wraps
from dgl import DGLHeteroGraph
from torch import Tensor
from contextlib import contextmanager
from torch.autograd import Function
from torch.cuda.amp import custom_fwd, custom_bwd
from dgl import function as fn
from .op_util impor... | 10,847 | 55.207254 | 224 | py |
AdaQP | AdaQP-master/AdaQP/assigner/profile.py | import time
import torch
from torch import Tensor
from typing import Dict, List, Tuple
import numpy as np
from ..helper import MessageType
from ..communicator import BITS_SET
from ..communicator import Communicator as comm
from ..manager import GraphEngine as engine
'''
***********************************************... | 4,863 | 44.457944 | 150 | py |
AdaQP | AdaQP-master/AdaQP/assigner/assigner.py | import time
import logging
import torch
from typing import Dict, List, Tuple, Union
from itertools import chain
from multiprocessing.pool import ThreadPool
from queue import Queue
from torch import Tensor
import numpy as np
import pulp as plp
from .profile import *
from ..helper import BitType
from ..communicator imp... | 24,597 | 55.28833 | 266 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/losses.py | import numpy as np
import torch.nn.functional as F
import torch
import sys
def boundary_weighted_loss(loss_function, output, target, boundaries_add_factor=None, patch_size=(7,7,7), just_boundary=False, out_boundary_factor=None):
"""
Params:
loss_function: instantiated class of the loss function
... | 3,107 | 42.166667 | 153 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/data_loader.py | import numpy as np
import nibabel as nib
import torch.utils.data as data
import torch
import os
import os.path
import util
from torchvision import transforms
import torchio as tio
import multiprocessing
from os.path import exists
#data loader
num_workers = 8
print('NUM WORKERS: '+str(num_workers))
SEGMENTATION_KEY = "... | 22,133 | 45.5 | 286 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/torchio_transforms.py |
from torchio import RandomElasticDeformation, RandomAffine, RandomFlip, RandomNoise, RandomMotion, RandomSpike, RandomBiasField, RandomBlur, RandomGamma
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
import sys
import torchio as tio
from torchio.transforms.augmentation.intensi... | 11,486 | 37.29 | 192 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/util.py | import numpy as np
from numpy.core.fromnumeric import shape
from scipy.ndimage import zoom
import nibabel as nib
import os
import torch
from torch.nn.functional import avg_pool3d
import torchvision.transforms as transforms
import shutil
import sys
import pandas as pd
import zipfile
import math
from torchio_transforms ... | 20,647 | 33.016474 | 196 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/metrics.py | import numpy as np
import torch
from medpy.metric.binary import assd as ASSD
from medpy.metric.binary import hd as Hausdorff_Distance
from medpy.metric.binary import hd95 as Hausdorff_Distance_95
def metric_time_series(img_4D,metric="None",voxel_spacing=1):
'''
img_4D: 4D time series
metric: "dice", "hausd... | 3,602 | 34.323529 | 98 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/unet_3d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class UNet(nn.Module):
def __init__(self, in_channels=1, squeeze=False):
super(UNet, self).__init__()
self.conv1 = Conv(in_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Dow... | 2,406 | 26.044944 | 89 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/run_model_timeseries.py | import numpy as np
import os
import torch
import torch.nn as nn
from unet_3d import UNet
import util
import argparse
import csv
from data_loader import DataLoader as DataLoaderInference
import postprocess
import metrics
MODEL_NAME = 'model_PIPPI.pt'
IMG_DIR_NAME = 'image'
LABEL_DIR_NAME = 'image'
PAD_FACTOR = 16 #fact... | 7,957 | 46.369048 | 262 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/train_placenta.py | import util
from util import split_train_val
from losses import boundary_weighted_loss
from metrics import dice_tensor
from unet_3d import UNet
from monai.losses.dice import DiceLoss, FocalLoss
import numpy as np
import torch
import torch.nn as nn
import os
import torchvision.transforms as transforms
import argparse
im... | 21,754 | 49.710956 | 198 | py |
automatic-placenta-segmentation | automatic-placenta-segmentation-main/run_model.py | import numpy as np
import os
import torch
import torch.nn as nn
from unet_3d import UNet
import torch.nn as nn
import util
from train_placenta import split_train_val
import argparse
import csv
import postprocess
import metrics
from metrics import dice
from medpy.metric.binary import assd as ASSD
from medpy.metric.binar... | 10,049 | 43.866071 | 297 | py |
rna-state-inf | rna-state-inf-master/rnn.py | import argparse
import numpy as np
import os
import keras as k
import tools
import makebatches
import sys
from keras.models import Sequential, load_model
from keras.layers import Bidirectional, Dropout, Dense, Conv1D, BatchNormalization
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import TimeDi... | 4,774 | 31.263514 | 99 | py |
rna-state-inf | rna-state-inf-master/makebatches.py | import numpy as np
import keras
from keras.utils import to_categorical
def getallsamples(path):
f = open(path, 'r')
sequences = []
states = []
for i, line in enumerate(f):
if i % 5 == 1:
sequences.append(line.rstrip().split(' '))
if i % 5 == 3:
... | 4,053 | 29.481203 | 136 | py |
layer-rotation-tools | layer-rotation-tools-master/keras/layer_rotation_monitoring.py | '''
Methods for recording and plotting layer rotation curves
'''
import numpy as np
from scipy.spatial.distance import cosine
import matplotlib
import matplotlib.pyplot as plt
from keras.callbacks import Callback
import keras.backend as K
from keras.losses import categorical_crossentropy
def get_kernel_layer_names(... | 3,097 | 33.808989 | 124 | py |
layer-rotation-tools | layer-rotation-tools-master/keras/layer_rotation_control.py | '''
Code for applying Layca on SGD, Adam, RMSprop and Adagrad.
Source: code is based on keras' implementation of the original optimization methods.
'''
from keras.optimizers import Optimizer
import keras.backend as K
from keras.legacy import interfaces
import numpy as np
def norm(w):
'''
computes frobenius n... | 14,034 | 39.681159 | 158 | py |
ABSA-QUAD | ABSA-QUAD-master/main.py | # -*- coding: utf-8 -*-
import argparse
import os
import logging
import time
import pickle
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from transformers import AdamW, T5ForConditionalGeneration, T5Tokenizer
# ... | 14,379 | 36.941953 | 117 | py |
ABSA-QUAD | ABSA-QUAD-master/data_utils.py | # -*- coding: utf-8 -*-
# This script contains all data transformation and reading
import random
from torch.utils.data import Dataset
senttag2word = {'POS': 'positive', 'NEG': 'negative', 'NEU': 'neutral'}
senttag2opinion = {'POS': 'great', 'NEG': 'bad', 'NEU': 'ok'}
sentword2opinion = {'positive': 'great', 'negativ... | 5,857 | 30.664865 | 94 | py |
digen | digen-main/setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='digen',
version='0.0.5',
author='Patryk Orzechowski',
author_email=('patryk.orzechowski@gmail.com'),
packages=['digen'],
package_dir={'digen' : ... | 2,155 | 34.344262 | 106 | py |
digen | digen-main/docker/replicate.py | import sys
import numpy as np
import pandas as pd
import re
import random
import itertools
import operator
import argparse
import inspect
from deap import base, tools, gp, creator
from digen import Benchmark, defaults
from xgboost import XGBClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomFore... | 1,844 | 30.271186 | 148 | py |
places365 | places365-master/convert_python36.py | import torch
from torch.autograd import Variable as V
import torchvision.models as models
from torchvision import transforms as trn
from torch.nn import functional as F
archs = ['resnet50','densenet161','alexnet']
for arch in archs:
model_file = 'whole_%s_places365.pth.tar' % arch
save_file = 'whole_%s_places... | 719 | 33.285714 | 99 | py |
places365 | places365-master/demo_pytorch_CAM.py | import torch
from torch.autograd import Variable as V
import torchvision.models as models
import skimage.io
from torchvision import transforms as trn
from torch.nn import functional as F
import os
import numpy as np
import cv2
# function to load exif of image
from PIL import Image, ExifTags
def imreadRotate(fn):
i... | 8,590 | 36.030172 | 110 | py |
places365 | places365-master/convert_model.py | import torch
from torch.autograd import Variable as V
import torchvision.models as models
from PIL import Image
from torchvision import transforms as trn
from torch.nn import functional as F
import os
# th architecture to use
arch = 'resnet18'
model_weight = '/data/vision/torralba/deepscene/moments/models/2stream-simp... | 1,043 | 37.666667 | 156 | py |
places365 | places365-master/run_placesCNN_unified.py | # PlacesCNN to predict the scene category, attribute, and class activation map in a single pass
# by Bolei Zhou, sep 2, 2017
# updated, making it compatible to pytorch 1.x in a hacky way
import torch
from torch.autograd import Variable as V
import torchvision.models as models
from torchvision import transforms as trn
... | 6,788 | 34.176166 | 137 | py |
places365 | places365-master/train_placesCNN.py | # this code is modified from the pytorch example code: https://github.com/pytorch/examples/blob/master/imagenet/main.py
# after the model is trained, you might use convert_model.py to remove the data parallel module to make the model as standalone weight.
#
# Bolei Zhou
import argparse
import os
import shutil
import t... | 10,646 | 35.214286 | 135 | py |
places365 | places365-master/run_placesCNN_basic.py | # PlacesCNN for scene classification
#
# by Bolei Zhou
# last modified by Bolei Zhou, Dec.27, 2017 with latest pytorch and torchvision (upgrade your torchvision please if there is trn.Resize error)
import torch
from torch.autograd import Variable as V
import torchvision.models as models
from torchvision import transfo... | 2,082 | 30.089552 | 142 | py |
places365 | places365-master/wideresnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/r... | 6,691 | 29.981481 | 95 | py |
places365 | places365-master/docker/run_scene.py | import numpy as np
import sys
import caffe
import pickle
def classify_scene(fpath_design, fpath_weights, fpath_labels, im):
# initialize net
net = caffe.Net(fpath_design, fpath_weights, caffe.TEST)
# load input and configure preprocessing
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape... | 1,434 | 26.596154 | 131 | py |
self-supervised | self-supervised-master/test.py | import torch
from datasets import get_ds
from cfg import get_cfg
from methods import get_method
from eval.sgd import eval_sgd
from eval.knn import eval_knn
from eval.lbfgs import eval_lbfgs
from eval.get_data import get_data
if __name__ == "__main__":
cfg = get_cfg()
model_full = get_method(cfg.method)(cfg)... | 1,187 | 28.7 | 64 | py |
self-supervised | self-supervised-master/model.py | import torch.nn as nn
from torchvision import models
def get_head(out_size, cfg):
""" creates projection head g() from config """
x = []
in_size = out_size
for _ in range(cfg.head_layers - 1):
x.append(nn.Linear(in_size, cfg.head_size))
if cfg.add_bn:
x.append(nn.BatchNorm1... | 940 | 30.366667 | 86 | py |
self-supervised | self-supervised-master/cfg.py | from functools import partial
import argparse
from torchvision import models
import multiprocessing
from datasets import DS_LIST
from methods import METHOD_LIST
def get_cfg():
""" generates configuration from user input in console """
parser = argparse.ArgumentParser(description="")
parser.add_argument(
... | 5,224 | 33.150327 | 88 | py |
self-supervised | self-supervised-master/train.py | from tqdm import trange, tqdm
import numpy as np
import wandb
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR, CosineAnnealingWarmRestarts
import torch.backends.cudnn as cudnn
from cfg import get_cfg
from datasets import get_ds
from methods import get_method
def get_schedule... | 2,546 | 30.8375 | 89 | py |
self-supervised | self-supervised-master/methods/base.py | import torch.nn as nn
from model import get_model, get_head
from eval.sgd import eval_sgd
from eval.knn import eval_knn
from eval.get_data import get_data
class BaseMethod(nn.Module):
"""
Base class for self-supervised loss implementation.
It includes encoder and head for training, evaluation func... | 1,447 | 31.177778 | 71 | py |
self-supervised | self-supervised-master/methods/norm_mse.py | import torch.nn.functional as F
def norm_mse_loss(x0, x1):
x0 = F.normalize(x0)
x1 = F.normalize(x1)
return 2 - 2 * (x0 * x1).sum(dim=-1).mean()
| 159 | 19 | 47 | py |
self-supervised | self-supervised-master/methods/w_mse.py | import torch
import torch.nn.functional as F
from .whitening import Whitening2d
from .base import BaseMethod
from .norm_mse import norm_mse_loss
class WMSE(BaseMethod):
""" implements W-MSE loss """
def __init__(self, cfg):
""" init whitening transform """
super().__init__(cfg)
self.w... | 1,332 | 35.027027 | 87 | py |
self-supervised | self-supervised-master/methods/whitening.py | import torch
import torch.nn as nn
from torch.nn.functional import conv2d
class Whitening2d(nn.Module):
def __init__(self, num_features, momentum=0.01, track_running_stats=True, eps=0):
super(Whitening2d, self).__init__()
self.num_features = num_features
self.momentum = momentum
se... | 2,285 | 33.119403 | 85 | py |
self-supervised | self-supervised-master/methods/byol.py | from itertools import chain
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from model import get_model, get_head
from .base import BaseMethod
from .norm_mse import norm_mse_loss
class BYOL(BaseMethod):
""" implements BYOL loss https://arxiv.org/abs/2006.07733 """
def __init__(... | 2,005 | 36.148148 | 80 | py |
self-supervised | self-supervised-master/methods/contrastive.py | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base import BaseMethod
def contrastive_loss(x0, x1, tau, norm):
# https://github.com/google-research/simclr/blob/master/objective.py
bsize = x0.shape[0]
target = torch.arange(bsize).cuda()
eye_mask =... | 1,585 | 32.744681 | 75 | py |
self-supervised | self-supervised-master/eval/knn.py | import torch
def eval_knn(x_train, y_train, x_test, y_test, k=5):
""" k-nearest neighbors classifier accuracy """
d = torch.cdist(x_test, x_train)
topk = torch.topk(d, k=k, dim=1, largest=False)
labels = y_train[topk.indices]
pred = torch.empty_like(y_test)
for i in range(len(labels)):
... | 501 | 28.529412 | 54 | py |
self-supervised | self-supervised-master/eval/sgd.py | import torch
import torch.nn as nn
import torch.optim as optim
def eval_sgd(x_train, y_train, x_test, y_test, topk=[1, 5], epoch=500):
""" linear classifier accuracy (sgd) """
lr_start, lr_end = 1e-2, 1e-6
gamma = (lr_end / lr_start) ** (1 / epoch)
output_size = x_train.shape[1]
num_class = y_trai... | 1,168 | 30.594595 | 84 | py |
self-supervised | self-supervised-master/eval/lbfgs.py | import torch
from sklearn.linear_model import LogisticRegression
def eval_lbfgs(x_train, y_train, x_test, y_test):
""" linear classifier accuracy (lbfgs method) """
clf = LogisticRegression(
random_state=1337, solver="lbfgs", max_iter=1000, n_jobs=-1
)
clf.fit(x_train, y_train)
pred = clf.... | 393 | 29.307692 | 67 | py |
self-supervised | self-supervised-master/eval/get_data.py | import torch
def get_data(model, loader, output_size, device):
""" encodes whole dataset into embeddings """
xs = torch.empty(
len(loader), loader.batch_size, output_size, dtype=torch.float32, device=device
)
ys = torch.empty(len(loader), loader.batch_size, dtype=torch.long, device=device)
... | 558 | 30.055556 | 87 | py |
self-supervised | self-supervised-master/datasets/cifar100.py | from torchvision.datasets import CIFAR100 as C100
import torchvision.transforms as T
from .transforms import MultiSample, aug_transform
from .base import BaseDataset
def base_transform():
return T.Compose(
[T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))]
)
class CIFAR1... | 816 | 29.259259 | 87 | py |
self-supervised | self-supervised-master/datasets/base.py | from abc import ABCMeta, abstractmethod
from functools import lru_cache
from torch.utils.data import DataLoader
class BaseDataset(metaclass=ABCMeta):
"""
base class for datasets, it includes 3 types:
- for self-supervised training,
- for classifier training for evaluation,
... | 1,689 | 23.852941 | 76 | py |
self-supervised | self-supervised-master/datasets/stl10.py | from torchvision.datasets import STL10 as S10
import torchvision.transforms as T
from .transforms import MultiSample, aug_transform
from .base import BaseDataset
def base_transform():
return T.Compose(
[T.ToTensor(), T.Normalize((0.43, 0.42, 0.39), (0.27, 0.26, 0.27))]
)
def test_transform():
re... | 938 | 27.454545 | 87 | py |
self-supervised | self-supervised-master/datasets/cifar10.py | from torchvision.datasets import CIFAR10 as C10
import torchvision.transforms as T
from .transforms import MultiSample, aug_transform
from .base import BaseDataset
def base_transform():
return T.Compose(
[T.ToTensor(), T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]
)
class CIFAR10(... | 809 | 29 | 87 | py |
self-supervised | self-supervised-master/datasets/tiny_in.py | from torchvision.datasets import ImageFolder
import torchvision.transforms as T
from .transforms import MultiSample, aug_transform
from .base import BaseDataset
def base_transform():
return T.Compose(
[T.ToTensor(), T.Normalize((0.480, 0.448, 0.398), (0.277, 0.269, 0.282))]
)
class TinyImageNet(Base... | 812 | 29.111111 | 87 | py |
self-supervised | self-supervised-master/datasets/imagenet.py | import random
from torchvision.datasets import ImageFolder
import torchvision.transforms as T
from PIL import ImageFilter
from .transforms import MultiSample, aug_transform
from .base import BaseDataset
class RandomBlur:
def __init__(self, r0, r1):
self.r0, self.r1 = r0, r1
def __call__(self, image):... | 1,225 | 28.190476 | 82 | py |
self-supervised | self-supervised-master/datasets/transforms.py | import torchvision.transforms as T
def aug_transform(crop, base_transform, cfg, extra_t=[]):
""" augmentation transform generated from config """
return T.Compose(
[
T.RandomApply(
[T.ColorJitter(cfg.cj0, cfg.cj1, cfg.cj2, cfg.cj3)], p=cfg.cj_p
),
T.... | 929 | 26.352941 | 79 | py |
self-supervised | self-supervised-master/tf2/whitening.py | import tensorflow.compat.v2 as tf
from absl import flags
FLAGS = flags.FLAGS
class Whitening1D(tf.keras.layers.Layer):
def __init__(self, eps=0, **kwargs):
super(Whitening1D, self).__init__(**kwargs)
self.eps = eps
def call(self, x):
bs, c = x.shape
x_t = tf.transpose(x, (1, ... | 1,496 | 31.543478 | 84 | py |
CoCLR | CoCLR-main/main_nce.py | import os
import sys
import argparse
import time, re
import builtins
import numpy as np
import random
import pickle
import socket
import math
from tqdm import tqdm
from backbone.select_backbone import select_backbone
import torch
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as ... | 19,971 | 42.32321 | 134 | py |
CoCLR | CoCLR-main/main_coclr.py | import os
import sys
import argparse
import time, re
import builtins
import numpy as np
import random
import pickle
import socket
import math
from tqdm import tqdm
from backbone.select_backbone import select_backbone
import torch
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as ... | 23,390 | 41.998162 | 144 | py |
CoCLR | CoCLR-main/backbone/s3dg.py | # modified from https://raw.githubusercontent.com/qijiezhao/s3d.pytorch/master/S3DG_Pytorch.py
import torch.nn as nn
import torch
## pytorch default: torch.nn.BatchNorm3d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
## tensorflow s3d code: torch.nn.BatchNorm3d(num_features, eps=1e-3, ... | 8,784 | 38.572072 | 123 | py |
CoCLR | CoCLR-main/backbone/resnet_2d3d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
__all__ = [
'ResNet2d3d', 'r2d3d50', 'r3d50'
]
def conv3x3x3(in_planes, out_planes, stride=1, bias=False):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_pl... | 7,218 | 30.801762 | 121 | py |
CoCLR | CoCLR-main/dataset/lmdb_dataset.py | import os
import sys
import glob
import msgpack
import lmdb
from io import BytesIO
import torch
from PIL import Image
import pandas as pd
from tqdm import tqdm
import random
import numpy as np
import math
import csv
import json
# naming convension:
# {}_2CLIP is for pretraining
# without 2CLIP is for action class... | 37,990 | 43.642773 | 137 | py |
CoCLR | CoCLR-main/eval/main_classifier.py | import os
import sys
sys.path.append('../')
import argparse
import time
import re
import numpy as np
import random
import pickle
from tqdm import tqdm
from PIL import Image
import json
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import torch
import torch.nn as n... | 34,801 | 41.493284 | 143 | py |
CoCLR | CoCLR-main/eval/merge_2stream_prob.py | import os
import sys
sys.path.append('../../')
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils import data
from utils.utils import AverageMeter, save_checkpoint, \
write_log, calc_topk_accuracy, Logger, ProgressMeter
import pickle
i... | 7,725 | 35.966507 | 106 | py |
CoCLR | CoCLR-main/eval/feature_linear_probe.py | import os
import sys
sys.path.append('../')
import argparse
import pickle
import numpy as np
from tqdm import tqdm
import math
import json
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.util... | 8,266 | 34.943478 | 122 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.