repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
signatory | signatory-master/benchmark/functions/signatory_signature_forward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 846 | 32.88 | 75 | py |
signatory | signatory-master/benchmark/functions/signatory_signature_backward_gpu.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,128 | 36.633333 | 89 | py |
signatory | signatory-master/benchmark/functions/signatory_logsignature_forward_gpu.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,023 | 34.310345 | 75 | py |
signatory | signatory-master/benchmark/functions/signatory_logsignature_forward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 959 | 34.555556 | 75 | py |
signatory | signatory-master/benchmark/functions/signatory_logsignature_backward_no_parallel.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,111 | 34.870968 | 82 | py |
signatory | signatory-master/benchmark/functions/esig_logsignature_forward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,079 | 32.75 | 75 | py |
signatory | signatory-master/benchmark/functions/signatory_logsignature_forward_no_parallel.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 989 | 33.137931 | 75 | py |
signatory | signatory-master/benchmark/functions/esig_signature_forward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,073 | 32.5625 | 75 | py |
signatory | signatory-master/benchmark/functions/iisignature_logsignature_forward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 926 | 34.653846 | 75 | py |
signatory | signatory-master/benchmark/functions/signatory_signature_backward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,069 | 35.896552 | 79 | py |
signatory | signatory-master/benchmark/functions/signatory_logsignature_backward_gpu.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,140 | 37.033333 | 89 | py |
signatory | signatory-master/benchmark/functions/iisignature_logsignature_backward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,061 | 36.928571 | 75 | py |
signatory | signatory-master/benchmark/functions/signatory_logsignature_backward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 1,081 | 36.310345 | 82 | py |
signatory | signatory-master/benchmark/functions/iisignature_signature_backward.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 984 | 35.481481 | 75 | py |
signatory | signatory-master/benchmark/functions/signatory_signature_forward_gpu.py | # Copyright 2019 Patrick Kidger. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law o... | 910 | 32.740741 | 75 | py |
satd-in-industry | satd-in-industry-main/_satd_detector/satd_detector.py | import argparse
import re
import string
import fasttext
import nltk
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch import autograd
DEF_COMMENT = 'code_comment'
DEF_COMMIT = 'commit_message'
DEF_PULL = 'pull_request'
DEF_ISSUE = 'issue'
DEF_MAPPING = {DEF_ISSUE: 0, DEF_COMMIT: 1, DEF_... | 9,986 | 35.316364 | 117 | py |
ASK | ASK-main/pgd.py | import torch
import torch.nn as nn
class PGD:
def __init__(self, eps=60 / 255., step_size=20 / 255., max_iter=10, random_init=True,
targeted=False, loss_fn=nn.CrossEntropyLoss(), batch_size=64):
self.eps = eps
self.step_size = step_size
self.max_iter = max_iter
sel... | 2,261 | 37.338983 | 100 | py |
ASK | ASK-main/ask_attack.py | from sklearn.neighbors import NearestNeighbors
import numpy as np
import torch
import torch.nn as nn
from ask_loss import ASKLoss
class ASKAttack:
def __init__(
self,
model,
train_data,
train_targets,
n_class=10,
n_neighbors=5,
cl... | 9,710 | 36.206897 | 109 | py |
ASK | ASK-main/data_utils.py | import os
import PIL
import random
import tarfile
import smart_open
from torchvision import transforms, datasets
from torch.utils.data import DataLoader, Dataset
class GenericDataset(Dataset):
def __init__(self, data, label, transform=None):
self.data = data
self.label = label
self.transfo... | 4,433 | 32.338346 | 109 | py |
ASK | ASK-main/ask_attack_fastknn.py | from sklearn.neighbors import NearestNeighbors
from annoy import AnnoyIndex
import numpy as np
import torch
import torch.nn as nn
from ask_loss import ASKLoss
class ASKAttack:
def __init__(
self,
model,
train_data,
train_targets,
n_class=10,
... | 10,297 | 37 | 112 | py |
ASK | ASK-main/dknn.py | import torch
from sklearn.neighbors import NearestNeighbors
import torch.nn as nn
import numpy as np
from tqdm import tqdm
class DKNN:
def __init__(
self,
model,
train_data,
train_targets,
n_class=10,
hidden_layers=-1,
n_neighbor... | 4,387 | 33.015504 | 96 | py |
ASK | ASK-main/ask_train.py | import torch
import torch.nn as nn
from torch.optim import SGD, lr_scheduler
from tqdm import tqdm
from pgd import PGD
from dknn import DKNN
import numpy as np
import os
from torch.utils.data import DataLoader
from argparse import ArgumentParser
from ask_loss import ASKLoss
from models.vgg import VGG16
from models.resn... | 16,031 | 46.856716 | 120 | py |
ASK | ASK-main/ask_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ASKLoss(nn.Module):
"""
Adversarial Soft K-nearest neighbor loss
"""
def __init__(
self,
reduction="mean",
temperature=1,
metric="l2",
type="instance-wise"
):
... | 4,840 | 42.223214 | 124 | py |
ASK | ASK-main/models/resnet.py | """
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
su... | 4,413 | 31.218978 | 83 | py |
ASK | ASK-main/models/vgg.py | import torch.nn as nn
class VGG16(nn.Module):
def __init__(self):
super(VGG16, self).__init__()
cfg1 = [64, 64, 'M']
cfg2 = [128, 128, 'M']
cfg3 = [256, 256, 256, 'M']
cfg4 = [512, 512, 512, 'M']
cfg5 = [512, 512, 512, 'M']
self.layer1 = self._make_layers(cf... | 1,661 | 31.588235 | 86 | py |
document-level-FEVER | document-level-FEVER-main/src/main.py | from transformers import BigBirdForTokenClassification, BigBirdModel, BigBirdTokenizer, BigBirdConfig, AutoTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import Dataset
from sklearn import metrics
from tqdm import tqdm
import numpy as np
import pandas as pd
import tor... | 9,488 | 39.207627 | 156 | py |
document-level-FEVER | document-level-FEVER-main/src/sentence_selection_dataset.py | from transformers import BigBirdForTokenClassification, BigBirdModel, BigBirdTokenizer, BigBirdConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import Dataset
from sklearn import metrics
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
import rando... | 4,807 | 31.486486 | 140 | py |
document-level-FEVER | document-level-FEVER-main/src/sentence_selection_model.py | from transformers import BigBirdForTokenClassification, BigBirdModel, BigBirdTokenizer, BigBirdConfig, RobertaModel
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers.modeling_outputs import TokenClassifierOutput
from torch.utils.data import Dataset
from sklearn import metrics
from tqdm ... | 3,196 | 29.160377 | 139 | py |
GRACR | GRACR-master/src/main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import torch
import random
import numpy as np
from data.dataset import DocRelationDataset
from data.loader import DataLoader, ConfigLoader
from nnet.trainer import Trainer
from utils.utils import setup_log, load_model, load_mappings
from utils.a... | 3,062 | 31.585106 | 117 | py |
GRACR | GRACR-master/src/models/basemodel.py | import torch
from torch import nn
import torch.nn.functional as F
from nnet.modules import EmbedLayer
from utils.tensor_utils import pool
class BaseModel(nn.Module):
def __init__(self, params, pembeds, loss_weight, sizes, maps, lab2ign):
super().__init__()
self.device = torch.device("cuda" if p... | 7,231 | 42.566265 | 122 | py |
GRACR | GRACR-master/src/models/gracr.py | import numpy
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from utils.tensor_utils import pool
from models.basemodel import BaseModel
from nnet.attention import SelfAttention
from transformers import *
import torchsnoope... | 23,165 | 48.184713 | 142 | py |
GRACR | GRACR-master/src/nnet/transformers_word_handle.py | import torch
from torch import nn
from transformers import *
import numpy as np
import os
from transformers import AlbertConfig, AlbertModel, AlbertTokenizer
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer), # bertModel
"xlnet": (XLNetConfig, XLNetModel, XLNetTokenizer),
"xlm": (XLMConfig... | 6,438 | 43.10274 | 113 | py |
GRACR | GRACR-master/src/nnet/modules.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.autograd import Variable
class LockedDropout(nn.Module):
def __init__(self, dropout):
super().__init__()
self.dropout = dropout
... | 10,807 | 35.147157 | 122 | py |
GRACR | GRACR-master/src/nnet/rgcn.py | import torch
from torch import nn
import torch.nn.functional as F
import torchsnooper
class RGCN_Layer(nn.Module):
""" A Relation GCN module operated on documents graphs. """
def __init__(self, params, in_dim, mem_dim, num_layers, relation_cnt, type):
super().__init__()
self.params = params
... | 4,243 | 39.037736 | 108 | py |
GRACR | GRACR-master/src/nnet/attention.py | #!/usr/bin/env python3
import torch
from torch import nn
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import copy
import math
class Dot_Attention(nn.Module):
"""
Adaptation from "Attention is all you need".
Here the query is the target pair and the key... | 15,399 | 36.652812 | 137 | py |
GRACR | GRACR-master/src/nnet/trainer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sklearn
import sklearn.metrics
import sys
import torch
import numpy as np
import os
from time import time
import itertools
import copy
import datetime
import random
from transformers import AdamW, get_linear_schedule_with_warmup, get_constant_schedule_with_warmup
... | 27,300 | 42.403816 | 155 | py |
GRACR | GRACR-master/src/utils/tensor_utils.py | import torch
from torch.nn.utils.rnn import pad_sequence
import torchsnooper
# @torchsnooper.snoop()
def split_n_pad(nodes, section, pad=0, return_mask=False):
"""
split tensor and pad
:param nodes:
:param section:
:param pad:
:return:
"""
assert nodes.shape[0] == sum(section.tolist())... | 2,622 | 28.47191 | 96 | py |
GRACR | GRACR-master/src/utils/adj_utils.py | import numpy as np
import scipy.sparse as sp
import time
import pickle
import torch
import torchsnooper
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv... | 3,769 | 32.660714 | 104 | py |
GRACR | GRACR-master/src/utils/utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
from tabulate import tabulate
import itertools
import numpy as np
import pickle as pkl
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def solve(A, B):
A = list(map(int, A))
B = list(map(int, B))
m = ... | 11,146 | 35.667763 | 172 | py |
GRACR | GRACR-master/data_processing/convert2result.py | import json
import argparse
import torch
parser = argparse.ArgumentParser()
parser.add_argument("--input_path", default=str)
parser.add_argument("--output_path", type=str)
inp = parser.parse_args()
input_file = "./data/DocRED/test.json"
ori_data = json.load(open(input_file))
docid = 0
docid2title = {}
for doc in ori_d... | 1,308 | 28.088889 | 107 | py |
graph-neural-networks | graph-neural-networks-master/examples/epidemicGRNN.py | # 2021/03/04~
# Luana Ruiz, rubruiz@seas.upenn.edu.
# Fernando Gama, fgama@seas.upenn.edu.
# Simulate the epidemic tracking problem. In this experiment, we compare GRNNs
# and gated GRNNs in a binary node classification problem modeling the spread of
# an epidemic on a high school friendship network. The epidemic data... | 35,657 | 37.885496 | 85 | py |
graph-neural-networks | graph-neural-networks-master/examples/flockingGNN.py | # 2020/01/01~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
# Kate Tolstaya, eig@seas.upenn.edu
# Learn decentralized controllers for flocking. There is a team of robots that
# start flying at random velocities and we want them to coordinate so that they
# can fly together while avoiding c... | 52,085 | 40.370929 | 81 | py |
graph-neural-networks | graph-neural-networks-master/examples/sourceLocGNN.py | # 2018/12/03~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
# Simulate the source localization problem. We have a graph, and we observe a
# signal defined on top of this graph. This signal is assumed to represent the
# diffusion of a rumor. The rumor is observed after being diffused for an... | 49,414 | 39.305873 | 97 | py |
graph-neural-networks | graph-neural-networks-master/examples/authorshipGNN.py | # 2019/04/08~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
# Test the authorship attribution dataset. The dataset consists on word
# adjacency networks (graph support) and word frequency count of short texts
# (graph signal) for a pool of authors of the 19th century. The word adjacency
#... | 39,005 | 39.006154 | 86 | py |
graph-neural-networks | graph-neural-networks-master/examples/movieGNN.py | # 2019/04/10~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
# Test a movie recommendation problem. The nodes are either items or users
# and the edges are rating similarities estimated by a Pearson correlation
# coefficient (either rating similarities between items or rating similarities
#... | 42,026 | 39.217225 | 86 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/modules/architecturesTime.py | # 2019/12/31~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
# Kate Tolstaya, eig@seas.upenn.edu
"""
architecturesTime.py Architectures module
Definition of GNN architectures. The basic idea of these architectures is that
the data comes in the form {(S_t, x_t)} where the shift operator as w... | 36,146 | 45.164751 | 80 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/modules/loss.py | # 2021/03/04~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
"""
loss.py Loss functions
adaptExtraDimensionLoss: wrapper that handles extra dimensions
F1Score: loss function corresponding to 1 - F1 score
"""
import torch
import torch.nn as nn
# An arbitrary loss function handling penaltie... | 4,632 | 36.064 | 80 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/modules/training.py | # 2020/02/25~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
"""
training.py Training Module
Trainer classes
Trainer: general trainer that just computes a loss over a training set and
runs an evaluation on a validation test
TrainerSingleNode: trainer class that computes a loss over the... | 76,058 | 43.819682 | 85 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/modules/model.py | # 2018/10/02~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
"""
model.py Model Module
Utilities useful for working on the model
Model: binds together the architecture, the loss function, the optimizer,
the trainer, and the evaluator.
"""
import os
import torch
class Model:
""... | 5,959 | 35.341463 | 80 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/modules/architectures.py | # 2021/03/04~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
"""
architectures.py Architectures module
Definition of GNN architectures.
SelectionGNN: implements the selection GNN architecture
LocalActivationGNN: implements the selection GNN architecture with a local
activation function... | 245,201 | 48.187964 | 88 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/modules/evaluation.py | # 2020/02/25~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
"""
evaluation.py Evaluation Module
Methods for evaluating the models.
evaluate: evaluate a model
evaluateSingleNode: evaluate a model that has a single node forward
evaluateFlocking: evaluate a model using the flocking cost
"""
... | 9,535 | 28.073171 | 80 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/utils/graphML.py | # 2021/03/04~
# Fernando Gama, fgama@seas.upenn.edu.
# Luana Ruiz, rubruiz@seas.upenn.edu.
# Kate Tolstaya, eig@seas.upenn.edu
"""
graphML.py Module for basic GSP and graph machine learning functions.
Functionals
LSIGF: Applies a linear shift-invariant graph filter
spectralGF: Applies a linear shift-invariant graph f... | 175,841 | 40.777619 | 93 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/utils/visualTools.py | # 2019/01/21~2018/07/12
# This function is taken almost verbatim from https://github.com/amaiasalvador
# and all credit should go to Amaia Salvador.
import os
import glob
import torchvision.utils as vutils
from operator import itemgetter
from tensorboardX import SummaryWriter
class Visualizer():
def __init__(self... | 2,521 | 37.212121 | 182 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/utils/miscTools.py | # 2018/10/15~
# Fernando Gama, fgama@seas.upenn.edu.
# Luana Ruiz, rubruiz@seas.upenn.edu.
"""
miscTools Miscellaneous Tools module
num2filename: change a numerical value into a string usable as a filename
saveSeed: save the random state of generators
loadSeed: load the number of random state of generators
writeVarVal... | 4,291 | 37.321429 | 80 | py |
graph-neural-networks | graph-neural-networks-master/alegnn/utils/dataTools.py | # 2021/03/04~
# Fernando Gama, fgama@seas.upenn.edu
# Luana Ruiz, rubruiz@seas.upenn.edu
# Kate Tolstaya, eig@seas.upenn.edu
"""
dataTools.py Data management module
Functions:
normalizeData: normalize data along a specified axis
changeDataType: change data type of data
Classes (datasets):
FacebookEgo (class): l... | 221,656 | 46.657923 | 89 | py |
TabularNCD | TabularNCD-main/Baseline.py | from sklearn.metrics import adjusted_rand_score, normalized_mutual_info_score
from sklearn.cluster import KMeans
import numpy as np
import argparse
import logging
import torch
import os
from src.utils import setup_device, setup_logging_level, plot_baseline_training_metrics, hungarian_accuracy
from src.training_procedu... | 5,016 | 49.676768 | 255 | py |
TabularNCD | TabularNCD-main/Clustering.py | from sklearn.cluster import SpectralClustering, KMeans
from sklearn.metrics import adjusted_rand_score, normalized_mutual_info_score
from tqdm import tqdm
import torch.nn as nn
import numpy as np
import argparse
import logging
import torch
import math
from src.import_utils import import_dataset_with_name
from src.util... | 6,826 | 48.115108 | 372 | py |
TabularNCD | TabularNCD-main/src/ncl_memory_module.py | import torch
class NCLMemoryModule:
"""
A simple object to store the *M* most recent training instances from the previous batches.
In short, this is a FIFO queue.
This is used during the transformation, where this queue is used to have a larger pool of data from which we
can pick the closest insta... | 2,780 | 52.480769 | 148 | py |
TabularNCD | TabularNCD-main/src/loss_functions.py | import torch.nn.functional as F
import torch.nn as nn
import torch
def vime_loss(mask_pred, mask_true, feature_pred, batch_x_train):
"""
Note that all the inputs should have values between 0 and 1.
:param mask_pred: The predicted corruption mask, torch.Tensor of shape (n_samples, n_features).
:param m... | 2,369 | 56.804878 | 124 | py |
TabularNCD | TabularNCD-main/src/TabularNCDModel.py | import torch.nn as nn
class TabularNCDModel(nn.Module):
def __init__(self, encoder_layers_sizes, ssl_layers_sizes, joint_learning_layers_sizes,
n_known_classes, n_unknown_classes, activation_fct, encoder_last_activation_fct,
ssl_last_activation_fct, joint_last_activation_fct, p_d... | 8,910 | 65.007407 | 226 | py |
TabularNCD | TabularNCD-main/src/utils.py | from sklearn.metrics import accuracy_score, balanced_accuracy_score, adjusted_rand_score, normalized_mutual_info_score
from scipy.optimize import linear_sum_assignment as linear_assignment
import matplotlib.pyplot as plt
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import logging
import torc... | 20,434 | 44.411111 | 182 | py |
TabularNCD | TabularNCD-main/src/import_utils.py | from keras.datasets import mnist
import pandas as pd
import numpy as np
import logging
import random
import torch
def import_dataset_with_name(dataset_name, device):
"""
Import procedure of the individual datasets.
This is where the number of unknown classes is defined for each dataset.
:param dataset... | 19,304 | 56.972973 | 264 | py |
TabularNCD | TabularNCD-main/src/training_procedures.py | from itertools import combinations
from tqdm import tqdm
import time
from src.loss_functions import unsupervised_classification_loss
from src.ncl_memory_module import NCLMemoryModule
from src.transforms import *
from src.utils import *
def joint_training(model, x_full, y_train_classifier, x_unlab, y_unlab, x_test, y... | 25,018 | 56.7806 | 202 | py |
TabularNCD | TabularNCD-main/src/BaselineModel.py | import torch.nn as nn
import math
class BaselineModel(nn.Module):
def __init__(self, input_size, n_classes, p_dropout=0.3):
"""
The baseline model object.
It is composed of a simple encoder of 2 dense layers and a classification network of a single dense layer.
:param input_size: i... | 1,182 | 33.794118 | 114 | py |
TabularNCD | TabularNCD-main/src/transforms.py | import torch.nn.functional as F
import torch.nn as nn
import random
import torch
import math
def transform_batch(batch, data_queue, device):
"""
Slow but easily understandable non-vectorized transformation for *numerical* data only.
Inspired from SMOTE.
:param batch: torch.tensor : The batch data to t... | 8,432 | 50.109091 | 162 | py |
structured-nets | structured-nets-master/scripts/visualizations/plot_speed.py | import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from timeit import default_timer as timer
import timeit
import pickle as pkl
import matplotlib.patches as mpatches
impor... | 3,064 | 28.190476 | 109 | py |
structured-nets | structured-nets-master/pytorch/main.py | import sys, os, datetime, subprocess
import pickle as pkl
import itertools
import argparse, argh
import threading
import logging
import pprint
import numpy as np
import torch
from torch.optim.lr_scheduler import StepLR
from inspect import signature
# Add PyTorch root to path
pytorch_root = os.path.join(os.path.dirname... | 7,008 | 43.360759 | 121 | py |
structured-nets | structured-nets-master/pytorch/utils.py | import torch
import torch.nn as nn
def mse_loss(pred, true):
loss_fn = nn.MSELoss()
mse = loss_fn(pred, true)
accuracy = torch.FloatTensor([0])
return mse, accuracy
def cross_entropy_loss(pred, true):
loss_fn = nn.CrossEntropyLoss()
_, true_argmax = torch.max(true, 1)
cross_entropy = loss... | 917 | 24.5 | 73 | py |
structured-nets | structured-nets-master/pytorch/dataset.py | import numpy as np
import os,sys,h5py
import scipy.io as sio
from scipy.linalg import solve_sylvester
import pickle as pkl
from sklearn.preprocessing import OneHotEncoder
import torch
from torchvision import datasets, transforms
import utils
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
de... | 8,210 | 38.287081 | 141 | py |
structured-nets | structured-nets-master/pytorch/examples/word_language_model/main.py | """
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
# coding: utf-8
import argparse, os
import time
import math
import torch
import torch.nn as nn
import pickle as pkl
import data
import model
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM La... | 10,013 | 37.664093 | 142 | py |
structured-nets | structured-nets-master/pytorch/examples/word_language_model/lstm.py | """
Some parts modified from https://github.com/jihunchoi/recurrent-batch-normalization-pytorch/blob/master/bnlstm.py
"""
import torch
from torch import nn
from torch.nn import init
from torch.autograd import Variable
import sys
sys.path.insert(0, '../../../pytorch/')
import structure.layer as sl
class LSTMCell(nn.Mo... | 3,637 | 35.747475 | 113 | py |
structured-nets | structured-nets-master/pytorch/examples/word_language_model/generate.py | """
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
#############################... | 2,695 | 35.931507 | 91 | py |
structured-nets | structured-nets-master/pytorch/examples/word_language_model/model.py | """
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
import torch.nn as nn
from torch.nn import Parameter
import torch
import numpy as np
import sys
from lstm import SingleLayerLSTM, LSTMCell
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent... | 3,045 | 40.726027 | 110 | py |
structured-nets | structured-nets-master/pytorch/examples/word_language_model/data.py | """
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
import os
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2w... | 1,540 | 28.075472 | 91 | py |
structured-nets | structured-nets-master/pytorch/examples/vae/main.py | """
Modified from pytorch/examples/vae to demonstrate 'StructuredLinear' usage.
"""
from __future__ import print_function
import argparse, sys, os
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils im... | 6,207 | 36.173653 | 116 | py |
structured-nets | structured-nets-master/pytorch/structure/layer.py | import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from . import toeplitz as toep
from . import krylov as kry
from . import circulant as circ
from . import fastfood as ff
from utils import descendants
class Layer(nn.Module):
class_ty... | 9,664 | 32.099315 | 184 | py |
structured-nets | structured-nets-master/pytorch/structure/hadamard.py | import numpy as np
import torch
use_hadamard_transform_cuda = True
try:
import hadamard_cuda
# import torch.utils.cpp_extension
# hadamard_cuda = torch.utils.cpp_extension.load(
# name='hadamard_cuda',
# sources=[
# 'hadamard_cuda/hadamard_cuda.cpp',
# 'hadamard_cuda... | 3,354 | 33.947917 | 114 | py |
structured-nets | structured-nets-master/pytorch/structure/fastfood.py | from .hadamard import hadamard_transform
import torch
import numpy as np
from scipy.linalg import hadamard
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# S,G,B: diagonal
# P: permutation
# x: batch_size x n_features
def fastfood_multiply(S,G,B,P,x):
HBx = hadamard_transform(B*x)
PHB... | 1,210 | 27.833333 | 71 | py |
structured-nets | structured-nets-master/pytorch/structure/circulant.py | import torch
from scipy.linalg import circulant
from .complex_utils import complex_mult
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def circulant_multiply(c, x):
""" Multiply circulant matrix with first column c by x
Parameters:
c: (n, )
x: (batch_size, n) or (n, )
... | 920 | 31.892857 | 105 | py |
structured-nets | structured-nets-master/pytorch/structure/LDR.py | import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.nn.parameter import Parameter
from . import toeplitz as toep
from . import krylov as kry
# TODO: rewrite with structure.layer
# TODO: subclass with each DR type
class LDR(nn.Module):
def name(self):
return str(self.in_chann... | 2,800 | 40.191176 | 122 | py |
structured-nets | structured-nets-master/pytorch/structure/toeplitz.py | '''Functions to multiply by a Toeplitz-like matrix.
'''
import numpy as np
import torch
from .complex_utils import complex_mult, conjugate
from .krylov import Krylov
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
##### Fast multiplication for the Toeplitz-like case
def toeplitz_krylov_tran... | 9,657 | 37.023622 | 131 | py |
structured-nets | structured-nets-master/pytorch/structure/krylov.py | '''Functions to multiply by an LDR matrix with subdiagonal and tridiagonal
operator matrices.
We implement the fast multiplication for the subdiagonal case.
This comprises two steps: Krylov(g) @ Krylov(h)^T @ u, which are Krylov
transpose multiply and Krylov multiply.
For tridiagonal case, we implement the slow multi... | 51,567 | 45.922657 | 186 | py |
structured-nets | structured-nets-master/pytorch/structure/complex_utils.py | ''' Utility functions for handling complex tensors: conjugate and complex_mult.
Pytorch (as of 0.4.0) does not support complex tensors, so we store them as
float tensors where the last dimension is 2 (real and imaginary parts).
'''
import torch
def conjugate(X):
assert X.shape[-1] == 2, 'Last dimension must be 2... | 647 | 31.4 | 79 | py |
structured-nets | structured-nets-master/pytorch/structure/hadamard_cuda/setup.py | import torch.cuda
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
ext_modules = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
'hadamard_cuda', [
... | 622 | 27.318182 | 81 | py |
structured-nets | structured-nets-master/pytorch/structure/diag_mult_cuda/setup.py | import torch.cuda
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
ext_modules = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
'diag_mult_cuda', [
... | 626 | 27.5 | 81 | py |
structured-nets | structured-nets-master/pytorch/structure/scratch/krylovfast.py | import numpy as np
import itertools
import pyfftw
import sys
sys.path.insert(0,'../../../pytorch/')
from structure.scratch.krylovslow import krylov_construct
# define fft calls
def _plan_ffts(in_shape, lib='numpy'):
out_shape = in_shape[:-1] + (in_shape[-1]//2 + 1,)
if lib == 'numpy':
x_for = np.zero... | 16,442 | 42.731383 | 135 | py |
structured-nets | structured-nets-master/pytorch/models/nets.py | from inspect import signature
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import structure.LDR as ldr
import structure.layer as sl
def construct_model(cls, in_size, out_size, args):
args_fn = cls.args
options = {param: vars(ar... | 12,657 | 32.223097 | 140 | py |
structured-nets | structured-nets-master/pytorch/learning/prune.py | import numpy as np
from learning import train
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def generate_mask(W, prune_factor):
weights = W.W.cpu().data.numpy()
N = int(weights.size/prune_factor)
# Get indices of N highest magnitude weights
idx = np.abs(weights.f... | 1,302 | 34.216216 | 162 | py |
structured-nets | structured-nets-master/pytorch/learning/train.py | import numpy as np
import os, time, logging
import pickle as pkl
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from tensorboardX import SummaryWriter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test_split(net, dataloader, loss_fn):
n = len(dat... | 5,790 | 36.36129 | 107 | py |
structured-nets | structured-nets-master/pytorch/old/misc/charRNN/char_rnn_classification_tutorial.py | # -*- coding: utf-8 -*-
"""
Classifying Names with a Character-Level RNN
*********************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
We will be building and training a basic character-level RNN to classify
words. A character-level RNN reads words as a series ... | 16,247 | 29.370093 | 122 | py |
structured-nets | structured-nets-master/pytorch/old/misc/circtest/utils.py | import copy
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
use_cuda = torch.cuda.is_available()
def get_train_valid_datasets(dataset,
valid_size=0.1,
random_seed=None,
... | 7,376 | 35.519802 | 111 | py |
structured-nets | structured-nets-master/pytorch/old/misc/circtest/circulant.py | import numpy as np
import math
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchvision import datasets, transforms
from torch import autograd
from torch.autograd import Variable
from utils import get_train_valid_datasets,... | 4,138 | 31.085271 | 111 | py |
structured-nets | structured-nets-master/pytorch/old/misc/attention/optimize_nmt.py | import numpy as np
import os, sys
sys.path.insert(0, '../../pytorch/')
import torch
from torch_utils import *
from torch.autograd import Variable
import torch.optim as optim
from tensorboardX import SummaryWriter
sys.path.insert(0, '../../pytorch/attention/')
from attention import *
sys.path.insert(0, '../../')
from da... | 3,300 | 38.297619 | 143 | py |
structured-nets | structured-nets-master/pytorch/old/misc/attention/optimize_iwslt.py | import numpy as np
import os, sys
sys.path.insert(0, '../../pytorch/')
import torch
from torch_utils import *
from torch.autograd import Variable
import torch.optim as optim
from torchtext import data, datasets
import spacy
from tensorboardX import SummaryWriter
sys.path.insert(0, '../../pytorch/attention/')
from atten... | 5,962 | 36.503145 | 143 | py |
structured-nets | structured-nets-master/pytorch/old/misc/attention/attention.py | """
http://nlp.seas.harvard.edu/2018/04/03/attention.html
"""
import sys
sys.path.insert(0, '../')
from structured_layer import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import copy, math
import numpy as np
class EncoderDecoder(nn.Module):
"""
A s... | 9,710 | 36.35 | 78 | py |
structured-nets | structured-nets-master/pytorch/old/misc/attention/train.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
from attention import *
from torchtext import data, datasets
# Skip if not interested in multigpu.
class MultiGPULossCompute:
"A multi-g... | 10,532 | 36.351064 | 91 | py |
structured-nets | structured-nets-master/pytorch/old/utils/torch_reconstruction.py | import torch
from torch.autograd import Variable
import time
from torch_utils import *
from torch_krylov import *
from scipy.linalg import toeplitz
import numpy as np
import functools
def krylov(fn, v, n):
cols = [v]
for _ in range(n - 1):
v = fn(v)
cols.append(v)
return torch.stack(cols, d... | 1,984 | 24.779221 | 93 | py |
structured-nets | structured-nets-master/pytorch/old/utils/torch_utils.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
# Circulant sparsity pattern
def gen_Z_f(m, f, v=None):
if v is not None:
assert v.size <= m-1
I_m = np.eye(m-1, m-1)
Z_f = np.hstack((I_m, np.zeros((m-1, 1))))
Z_f = np.vstack((np.zeros((1, m)), Z_f))
... | 4,489 | 32.259259 | 87 | py |
structured-nets | structured-nets-master/pytorch/old/utils/torch_krylov.py | import torch
import functools
import numpy as np
from torch.autograd import Variable
import time
# Down shift
def Z_mult_fn(f, x):
return torch.cat((f * x[-1], x[:-1]))
# Up shift
def Z_transpose_mult_fn(f, x):
#print('x[1:]: ', x[1:])
#print('f*x[0]: ', f*x[0])
#return torch.cat((x[1:], torch.FloatTe... | 3,596 | 36.082474 | 98 | py |
structured-nets | structured-nets-master/tensorflow/compare.py | """
Compare methods and hyperparameter settings sequentially.
"""
import sys, os, datetime
import pickle as pkl
# sys.path.insert(0, '../')
import argparse
import threading
import logging
import numpy as np
from optimize_tf import optimize_tf
from utils import *
from model_params import ModelParams
from dataset impor... | 7,643 | 43.701754 | 282 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.