repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.31.18.py | from random import shuffle
import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda... | 22,999 | 35.624204 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/seq2seqTriplet.py | '''Sequence to sequence example in Keras (character-level).
This script demonstrates how to implement a basic character-level
sequence-to-sequence model. We apply it to translating
short English sentences into short French sentences,
character-by-character. Note that it is fairly unusual to
do character-level machine t... | 9,670 | 39.634454 | 106 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM_hpo.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras... | 19,474 | 35.88447 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-schroffloss.py | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSE... | 21,846 | 37.463028 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.29.18.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras... | 21,235 | 36.061082 | 163 | py |
smt | smt-master/doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SMT documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 6 19:36:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autoge... | 5,197 | 28.873563 | 79 | py |
pyzor | pyzor-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Pyzor documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 7 15:20:07 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All... | 8,310 | 30.244361 | 79 | py |
MINDER | MINDER-main/scripts/build_fm_index.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import logging
import multiprocessing
import re
import ftfy
import torch
import tqdm
import pic... | 7,216 | 29.974249 | 136 | py |
MINDER | MINDER-main/seal/keys.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import defaultdict
from heapq import heappop, heappush
from itertools import chain, islice, pr... | 18,977 | 34.079482 | 138 | py |
MINDER | MINDER-main/seal/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
def _remove_ignore_keys_(state_dict):
ignore_keys = [
"encoder.version",
... | 2,021 | 35.107143 | 102 | py |
MINDER | MINDER-main/seal/beam_search.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import UserDict
from typing import *
import warnings
from more_itertools import chunked
import torch
from... | 31,228 | 40.090789 | 183 | py |
SRU_for_GCI | SRU_for_GCI-master/main.py | #!/usr/bin/env python
# coding: utf-8
# Import header files
import math
import argparse
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
import time
import sys
from models.sru... | 12,432 | 33.72905 | 186 | py |
SRU_for_GCI | SRU_for_GCI-master/models/esru_2LF.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class eSRU_2LF(torch.nn.Module):
def __init__(self,
... | 14,315 | 47.040268 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/models/esru_1LF.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class eSRU_1LF(torch.nn.Module):
def __init__(self,
... | 13,531 | 45.501718 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/models/sru.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class SRU(torch.nn.Module):
def __init__(self,
... | 15,047 | 45.018349 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/utilFuncs.py | # Import header files
import math
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
import time
import sys
import csv
###########################################
# Python/numpy... | 11,267 | 28.730871 | 142 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/lorenz96Checker.py | import numpy as np
import torch
from utilFuncs import calcPerfMetrics, calcAUROC, calcAUPR
# lorenz96 params
T = 1000
F = 40.0
model_name = 'lstm'
mu = 6.6 # F = 10, mu = 0.2| F = 40, mu = 4.0
n = 10
numDatasets = 5
max_iter = 500
verbose = 0
thresholdVec = np.arange(0, 1, 0.05)
#thresholdVec = np.arange(0, 0.1, ... | 1,909 | 32.508772 | 146 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/perfChk.py | import math
import torch
import matplotlib
#import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
#import time
from utilFuncs import loadTrueNetwork, getCausalNodes, calcPerfMetrics, calcAUROC, calcAUPR
dataset = 'LORENZ'
#dataset = 'VAR'
#dataset = 'GENE'
if(dataset == 'LORENZ'):
dat... | 3,856 | 30.357724 | 160 | py |
Quantized-GBDT | Quantized-GBDT-master/experiments/generate_script.py | import os
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("data_path", type=str)
arg_parser.add_argument("--use-discretized-grad", action='store_true')
arg_parser.add_argument("--discretized-grad-renew", action='store_true')
arg_parser.add_argument("--stochastic-rounding", action='store_... | 7,377 | 46.294872 | 177 | py |
neuron-merging | neuron-merging-main/main.py | from __future__ import print_function
import warnings
warnings.simplefilter("ignore", UserWarning)
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import sys
import pickle
import copy
cwd = os.getcwd()
sys.path.append(cwd+'/.... | 14,964 | 38.485488 | 167 | py |
neuron-merging | neuron-merging-main/decompose.py | from __future__ import print_function
import argparse
import pickle
import numpy as np
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial.distance import cosine
import torch
import torch.nn as nn
impo... | 19,186 | 35.616412 | 168 | py |
neuron-merging | neuron-merging-main/models/ResNet.py | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False... | 7,173 | 26.381679 | 99 | py |
neuron-merging | neuron-merging-main/models/LeNet_300_100.py | from __future__ import print_function
import torch
import torch.nn as nn
import os
class LeNet_300_100(nn.Module):
def __init__(self, bias_flag, cfg):
if cfg == None:
cfg = [300,100]
super(LeNet_300_100, self).__init__()
self.ip1 = nn.Linear(28*28, cfg[0], bias=bias_flag)
... | 749 | 29 | 60 | py |
neuron-merging | neuron-merging-main/models/VGG.py | from __future__ import print_function
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
__all__ = ['VGG']
defaultcfg = {
11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512,... | 3,154 | 26.920354 | 107 | py |
neuron-merging | neuron-merging-main/models/WideResNet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, cfg, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
... | 4,955 | 35.711111 | 119 | py |
LAP-PAL | LAP-PAL-master/continuous/main.py | import numpy as np
import torch
import gym
import argparse
import os
import time
import utils
import TD3
import LAP_TD3
import PAL_TD3
import PER_TD3
# Runs policy for X episodes and returns average reward
def eval_policy(policy, env, seed, eval_episodes=10):
eval_env = gym.make(env)
eval_env.seed(seed + 100)
av... | 5,222 | 33.361842 | 121 | py |
LAP-PAL | LAP-PAL-master/continuous/PAL_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 25... | 5,164 | 26.768817 | 119 | py |
LAP-PAL | LAP-PAL-master/continuous/PER_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 25... | 5,032 | 26.653846 | 94 | py |
LAP-PAL | LAP-PAL-master/continuous/utils.py | import numpy as np
import torch
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, ... | 4,293 | 28.410959 | 93 | py |
LAP-PAL | LAP-PAL-master/continuous/TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 25... | 4,551 | 26.756098 | 93 | py |
LAP-PAL | LAP-PAL-master/continuous/LAP_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 25... | 5,118 | 26.67027 | 116 | py |
LAP-PAL | LAP-PAL-master/discrete/main.py | import argparse
import copy
import importlib
import json
import os
import numpy as np
import torch
import DDQN
import PER_DDQN
import LAP_DDQN
import PAL_DDQN
import utils
def main(env, replay_buffer, is_atari, state_dim, num_actions, args, parameters, device):
# Initialize and load policy
kwargs = {
"is_atari"... | 6,543 | 26.846809 | 116 | py |
LAP-PAL | LAP-PAL-master/discrete/utils.py | import cv2
import gym
import numpy as np
import torch
def ReplayBuffer(state_dim, prioritized, is_atari, atari_preprocessing, batch_size, buffer_size, device):
if is_atari:
return PrioritizedAtariBuffer(state_dim, atari_preprocessing, batch_size, buffer_size, device, prioritized)
else:
return PrioritizedStand... | 10,436 | 28.483051 | 109 | py |
LAP-PAL | LAP-PAL-master/discrete/PER_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4,... | 5,366 | 28.010811 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/PAL_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4,... | 4,712 | 27.053571 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/LAP_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4,... | 4,658 | 27.408537 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4,... | 4,265 | 27.251656 | 111 | py |
sa-nmt | sa-nmt-master/Loss.py | """
This file handles the details of the loss function during training.
This includes: loss criterion, training statistics, and memory optimizations.
"""
from __future__ import division
import time
import sys
import math
import torch
import torch.nn as nn
def nmt_criterion(vocab_size, pad_id=0):
"""
Construc... | 4,092 | 27.227586 | 77 | py |
sa-nmt | sa-nmt-master/translate.py | import argparse
import torch
import modelx as models
import infer
import string
# build args parser
parser = argparse.ArgumentParser(description='Training NMT')
parser.add_argument('-checkpoint', required=True,
help='saved checkpoit.')
parser.add_argument('-input', required=True,
... | 1,416 | 28.520833 | 71 | py |
sa-nmt | sa-nmt-master/extract_tree.py | import argparse
import torch
from torch.autograd import Variable
import modelx as models
import networkx as nx
from networkx.algorithms.tree import maximum_spanning_arborescence
import string
# build args parser
parser = argparse.ArgumentParser(description='Training NMT')
parser.add_argument('-checkpoint', required=T... | 4,276 | 26.242038 | 70 | py |
sa-nmt | sa-nmt-master/models.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from attention import GlobalAttention, SelfAttention
from Utils import aeq
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
import math
class EncoderBase(nn.Module):
"""
... | 15,669 | 36.488038 | 79 | py |
sa-nmt | sa-nmt-master/infer.py | import torch
from torch.autograd import Variable
import pickle as pkl
import math
# TODO: documentation of functions
class Beam(object):
r"""Beam search class for NMT.
This is a simple beam search object. It takes model, which can be used to
compute the next probable output and dictionaries that will be u... | 4,938 | 35.316176 | 78 | py |
sa-nmt | sa-nmt-master/attention.py | import torch
import torch.nn as nn
from Utils import aeq
import math
import torch.nn.functional as F
class SelfAttention(nn.Module):
"""Self attention class"""
def __init__(self, dim):
super(SelfAttention, self).__init__()
self.q = nn.Linear(dim, dim, bias=False)
self.k = nn.Linear(dim... | 6,737 | 35.032086 | 78 | py |
sa-nmt | sa-nmt-master/train.py | import argparse
import torch
from Iterator import TextIterator
import models
from itertools import zip_longest
import random
import Loss
import opts
import os
import math
import subprocess
from infer import Beam
import re
from torch.optim.lr_scheduler import ReduceLROnPlateau
parser = argparse.ArgumentParser(descripti... | 8,797 | 36.598291 | 77 | py |
cogcn | cogcn-main/cogcn/utils.py | import pickle as pkl
import os
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
import pandas as pd
from sklearn.metrics import roc_auc_score, average_precision_score
from matplotlib import pyplot as plt
def load_data_cma(dataset):
adj_file = os.path.join(dataset, "struct.csv")
f... | 2,240 | 29.69863 | 95 | py |
cogcn | cogcn-main/cogcn/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphConvolution
class GCNAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
super(GCNAE, self).__init__()
self.encgc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, a... | 1,415 | 31.930233 | 93 | py |
cogcn | cogcn-main/cogcn/kmeans.py | import sys
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
class Clustering(object):
def __init__(self, K, n_init=5, max_iter=250):
self.K = K
self.n_init = n_init
self.max_iter = max_iter
self.u = None
self.M = None
def cluster(self, embed):
... | 1,646 | 28.410714 | 109 | py |
cogcn | cogcn-main/cogcn/layers.py | import torch
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0., act=F.relu):... | 1,110 | 30.742857 | 77 | py |
cogcn | cogcn-main/cogcn/train.py | from __future__ import division
from __future__ import print_function
import argparse
import time
import sys
import os
import pickle
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
from torch import optim
from matplotlib import pyplot as plt
from model import GCNAE
from optimizer impor... | 5,980 | 39.412162 | 157 | py |
cogcn | cogcn-main/cogcn/optimizer.py | import sys
import torch
import torch.nn as nn
import torch.nn.modules.loss
import torch.nn.functional as F
from sklearn.cluster import KMeans
def compute_attribute_loss(lossfn, features, recon, outlier_wt):
loss = lossfn(features, recon)
loss = loss.sum(dim=1)
outlier_wt = torch.log(1/outlier_wt)
at... | 1,768 | 23.915493 | 64 | py |
deepglo | deepglo-master/DeepGLO/DeepGLO.py | from __future__ import print_function
import torch, h5py
import numpy as np
from scipy.io import loadmat
from torch.nn.utils import weight_norm
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import sys
import itertools
import torch.nn.func... | 25,258 | 32.235526 | 131 | py |
deepglo | deepglo-master/DeepGLO/data_loader.py | import torch, h5py
import numpy as np
from scipy.io import loadmat
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import itertools
from sklearn.preprocessing import normalize
import datetime
import json
import os, sys
import pandas as pd
im... | 6,610 | 34.735135 | 167 | py |
deepglo | deepglo-master/DeepGLO/LocalModel.py | import torch, h5py
import numpy as np
from scipy.io import loadmat
from torch.nn.utils import weight_norm
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import itertools
import torch.nn.functional as F
from DeepGLO.data_loader import *
... | 21,683 | 31.804841 | 157 | py |
deepglo | deepglo-master/run_scripts/run_traffic.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
impor... | 3,498 | 24.727941 | 87 | py |
deepglo | deepglo-master/run_scripts/run_wiki.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import ... | 3,475 | 24.940299 | 87 | py |
deepglo | deepglo-master/run_scripts/run_pems.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#sys.path.append("/efs/users/rajatse/DeepGLOv2/")
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGL... | 3,544 | 25.259259 | 88 | py |
deepglo | deepglo-master/run_scripts/run_electricity.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import... | 3,478 | 25.157895 | 87 | py |
HyperIMBA | HyperIMBA-main/main.py | import argparse
import torch
import dataloader as dl
import torch.nn.functional as F
import numpy as np
from models import GatHyper, SageHyper, GcnHyper
import test as tt
def main(args):
if args.dataset == 'all':
ds_names = ['Cora','Citeseer','Photo','Actor','chameleon','Squirrel']
else:
ds_nam... | 5,254 | 56.119565 | 183 | py |
HyperIMBA | HyperIMBA-main/test.py | import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
def test(model, data, train_mask, val_mask, test_mask, alpha):
with torch.no_grad():
model.eval()
logits, accs = model(data, alpha), []
for mask in [train_mask,val_mask,test_mask]:
pred = logits[ma... | 617 | 37.625 | 81 | py |
HyperIMBA | HyperIMBA-main/dataloader.py | import torch_geometric.datasets as dt
import torch_geometric.transforms as T
import torch
import numpy as np
from dgl.data.utils import generate_mask_tensor, idx2mask
from sklearn.model_selection import train_test_split
def select_dataset(ds,spcial):
if ds=='Cora' or ds=='Citeseer':
ds_loader='Planetoid'
... | 3,365 | 41.607595 | 156 | py |
HyperIMBA | HyperIMBA-main/calculator.py | #Calculate Hyperbolic Embedding
import argparse
import torch
import numpy as np
from models.Poincare import PoincareModel
import dataloader as dl
from torch_geometric.utils import degree, to_networkx
from GraphRicciCurvature.OllivierRicci import OllivierRicci
parser = argparse.ArgumentParser(description='Calculate Hyp... | 2,190 | 41.134615 | 155 | py |
HyperIMBA | HyperIMBA-main/models/GcnHyper.py | from typing import Optional, Tuple
import numpy as np
import torch
from torch import Tensor
from torch.nn import Parameter
from torch_scatter import scatter_add
from torch_sparse import SparseTensor, fill_diag, matmul, mul
from torch_sparse import sum as sparsesum
import torch.nn.functional as F
from torch_geometric.... | 12,196 | 40.06734 | 131 | py |
HyperIMBA | HyperIMBA-main/models/SageHyper.py | import numpy as np
import torch
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
from typing import List, Optional, Tuple, Union
import torch.nn.functional as F
from torch import Tensor
from torch.nn import LSTM
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.aggr import ... | 9,477 | 38.327801 | 131 | py |
HyperIMBA | HyperIMBA-main/models/GatHyper.py | from typing import Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from torch_sparse import SparseTensor, set_diag
import math
import numpy as np
from typing import Any
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
f... | 12,092 | 39.043046 | 136 | py |
larq | larq-main/larq/optimizers_test.py | import numpy as np
import pytest
import tensorflow as tf
from packaging import version
from tensorflow import keras
from tensorflow.python.keras import testing_utils
import larq as lq
from larq import testing_utils as lq_testing_utils
if version.parse(tf.__version__) >= version.parse("2.11"):
from tensorflow.kera... | 10,528 | 37.01083 | 88 | py |
larq | larq-main/larq/callbacks.py | from typing import Any, Callable, MutableMapping, Optional
from tensorflow import keras
class HyperparameterScheduler(keras.callbacks.Callback):
"""Generic hyperparameter scheduler.
!!! example
```python
bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
adam = tf.keras.optimizers.A... | 4,375 | 36.724138 | 89 | py |
larq | larq-main/larq/quantizers.py | """A Quantizer defines the way of transforming a full precision input to a
quantized output and the pseudo-gradient method used for the backwards pass.
Quantizers can either be used through quantizer arguments that are supported
for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they
can be used sim... | 23,775 | 30.449735 | 141 | py |
larq | larq-main/larq/context.py | """Context managers that configure global behaviour of Larq."""
import contextlib
import threading
__all__ = [
"metrics_scope",
"quantized_scope",
"get_training_metrics",
"should_quantize",
]
_quantized_scope = threading.local()
_quantized_scope.should_quantize = False
@contextlib.contextmanager
d... | 2,953 | 29.453608 | 95 | py |
larq | larq-main/larq/conftest.py | import pytest
import tensorflow as tf
from packaging import version
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from larq import context as lq_context
if version.parse(tf.__version__) >= version.parse("1.15"):
strategy_combinations.set_virtual_cpus_to... | 2,508 | 27.511364 | 85 | py |
larq | larq-main/larq/testing_utils.py | import numpy as np
import tensorflow as tf
import larq as lq
def _eval_tensor(tensor):
if tensor is None:
return None
elif callable(tensor):
return _eval_helper(tensor())
else:
return tensor.numpy()
def _eval_helper(tensors):
if tensors is None:
return None
retur... | 7,954 | 34.044053 | 117 | py |
larq | larq-main/larq/quantized_variable.py | """Contains QuantizedVariable, a variable that can be quantized in the forward pass."""
from typing import Optional
import tensorflow as tf
from packaging import version
from tensorflow.python.distribute.values import DistributedVariable
from tensorflow.python.framework import ops
from tensorflow.python.ops import res... | 17,061 | 36.915556 | 148 | py |
larq | larq-main/larq/optimizers.py | """Neural networks with extremely low-precision weights and activations, such as
Binarized Neural Networks (BNNs), usually contain a mix of low-precision weights (e.g.
1-bit) and higher-precision weights (e.g. 8-bit, 16-bit, or 32-bit). Examples of this
include the first and last layers of image classificiation models... | 14,848 | 39.350543 | 201 | py |
larq | larq-main/larq/math_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("fn", [lq.math.sign])
def test_sign(fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [fn(x)])
binarized_values... | 1,299 | 31.5 | 80 | py |
larq | larq-main/larq/layers_base.py | import logging
from typing import Optional
import tensorflow as tf
from larq import context, quantizers, utils
from larq.quantized_variable import QuantizedVariable
from larq.quantizers import NoOp, QuantizerType
log = logging.getLogger(__name__)
def _is_binary(quantizer):
return getattr(quantizer, "precision"... | 9,491 | 35.933852 | 94 | py |
larq | larq-main/larq/utils.py | from contextlib import contextmanager
import tensorflow as tf
def memory_as_readable_str(num_bits: int) -> str:
"""Generate a human-readable string for the memory size.
1 KiB = 1024 B; we use the binary prefix (KiB) [1,2] instead of the decimal prefix
(KB) to avoid any confusion with multiplying by 1000... | 1,874 | 25.408451 | 105 | py |
larq | larq-main/larq/models_test.py | import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq.models import ModelProfile
class ToyModel(tf.keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv = lq.layers.QuantConv2D(
filters=32,
... | 11,312 | 33.281818 | 88 | py |
larq | larq-main/larq/layers.py | """Each Quantized Layer requires a `input_quantizer` and `kernel_quantizer` that
describes the way of quantizing the activation of the previous layer and the weights
respectively.
If both `input_quantizer` and `kernel_quantizer` are `None` the layer
is equivalent to a full precision layer.
"""
import tensorflow as tf... | 65,598 | 46.535507 | 91 | py |
larq | larq-main/larq/callbacks_test.py | import math
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
from tensorflow.python.keras import testing_utils
import larq as lq
from larq import testing_utils as lq_testing_utils
from larq.callbacks import HyperparameterScheduler
if version.parse(tf.__version__) >= version.pars... | 6,015 | 27.647619 | 87 | py |
larq | larq-main/larq/constraints_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("name", ["weight_clip"])
def test_serialization(name):
fn = tf.keras.constraints.get(name)
ref_fn = getattr(lq.constraints, name)()
assert fn.... | 811 | 31.48 | 73 | py |
larq | larq-main/larq/layers_test.py | import inspect
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq import testing_utils
PARAMS_ALL_LAYERS = [
(lq.layers.QuantDense, tf.keras.layers.Dense, (3, 2), dict(units=3)),
(
lq.layers.QuantConv1D,
tf.keras.layers.Conv1D,
... | 12,025 | 34.68546 | 91 | py |
larq | larq-main/larq/constraints.py | """Functions from the `constraints` module allow setting constraints
(eg. weight clipping) on network parameters during optimization.
The penalties are applied on a per-layer basis. The exact API will depend on the layer,
but the layers `QuantDense`, `QuantConv1D`, `QuantConv2D` and `QuantConv3D` have a
unified API.
... | 1,392 | 25.283019 | 87 | py |
larq | larq-main/larq/models.py | import itertools
from dataclasses import dataclass
from typing import Any, Callable, Iterator, Mapping, Optional, Sequence, TypeVar, Union
import numpy as np
import tensorflow as tf
from terminaltables import AsciiTable
from larq import layers as lq_layers
from larq.utils import memory_as_readable_str
__all__ = ["su... | 16,824 | 31.861328 | 105 | py |
larq | larq-main/larq/metrics.py | """We add metrics specific to extremely quantized networks using a
`larq.context.metrics_scope` rather than through the `metrics` parameter of
`model.compile()`, where most common metrics reside. This is because, to calculate
metrics like the `flip_ratio`, we need a layer's kernel or activation and not just the
`y_true... | 3,341 | 34.935484 | 86 | py |
larq | larq-main/larq/activations.py | """Activations can either be used through an `Activation` layer, or through the
`activation` argument supported by all forward layers:
```python
import tensorflow as tf
import larq as lq
model.add(lq.layers.QuantDense(64))
model.add(tf.keras.layers.Activation('hard_tanh'))
```
This is equivalent to:
```python
model... | 1,481 | 21.119403 | 79 | py |
larq | larq-main/larq/quantizers_test.py | import functools
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq import testing_utils
class DummyTrainableQuantizer(tf.keras.layers.Layer):
"""Used to test whether we can set layers as quantizers without any throws."""
_custom_metrics = Non... | 17,283 | 37.238938 | 96 | py |
larq | larq-main/larq/activations_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("name", ["hard_tanh", "leaky_tanh"])
def test_serialization(name):
fn = tf.keras.activations.get(name)
ref_fn = getattr(lq.activations, name)
... | 1,259 | 29 | 74 | py |
larq | larq-main/larq/quantized_variable_test.py | import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_almost_equal, assert_array_equal
from packaging import version
from tensorflow.python.distribute.values import DistributedVariable
from larq import context, testing_utils
from larq.quantized_variable import QuantizedVariable
from... | 14,405 | 37.31383 | 94 | py |
DAC2018 | DAC2018-master/setup.py | from distutils.core import setup, Extension
module = Extension('mypack',extra_compile_args=['-std=c++11'], include_dirs=['/usr/local/cuda/include'],
sources = ['Detector.cpp'],extra_objects = ['./plugin.o', './kernel.o'], extra_link_args=['-lnvinfer', '-lnvcaffe_parser', '-lcudnn'])
setup(name = 'mypack', vers... | 388 | 54.571429 | 142 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import Sca... | 4,384 | 45.648936 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/recurrent_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from src.aspect_category_model.capsnet import CapsuleNetwork
class RecurrentCapsuleNetwork(CapsuleNetwork):
def __init__(self, embedding, aspect_embedding, num_layers, bidirectional, capsule_size, dropout, num_categories):
super(RecurrentCa... | 1,597 | 41.052632 | 118 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/bert_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import Sca... | 4,772 | 48.206186 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import Sca... | 4,714 | 46.15 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/recurrent_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from src.aspect_term_model.capsnet import CapsuleNetwork
class RecurrentCapsuleNetwork(CapsuleNetwork):
def __init__(self, embedding, num_layers, bidirectional, capsule_size, dropout, num_categories):
super(RecurrentCapsuleNetwork, self).__... | 1,528 | 40.324324 | 100 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/bert_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import Sca... | 4,780 | 47.785714 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/concat_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class ConcatAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(ConcatAttention, self).__init__(dropout)
self.query_weights = nn.Parameter(torch.Tensor(quer... | 1,007 | 41 | 101 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/bilinear_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class BilinearAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(BilinearAttention, self).__init__(dropout)
self.weights = nn.Parameter(torch.FloatTensor(q... | 659 | 33.736842 | 76 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/tanh_bilinear_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class TanhBilinearAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(TanhBilinearAttention, self).__init__(dropout)
self.weights = nn.Parameter(torch.Float... | 740 | 36.05 | 94 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/tanh_concat_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class TanhConcatAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(TanhConcatAttention, self).__init__(dropout)
self.query_weights = nn.Parameter(torch.Ten... | 1,049 | 41 | 101 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/multi_head_attention.py | from torch import nn
from torch.nn import init
import math
class MultiHeadAttention(nn.Module):
def __init__(self, attention, num_heads, hidden_size, key_size='default', value_size='default', out_size='default'):
key_size = hidden_size // num_heads if key_size == 'default' else key_size
value_size... | 3,451 | 55.590164 | 120 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/attention.py | from torch import nn
import torch.nn.functional as F
from src.module.utils import constants
class Attention(nn.Module):
"""
The base class of attention.
"""
def __init__(self, dropout):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value, m... | 2,355 | 37 | 104 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.