repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
hessian-eff-dim | hessian-eff-dim-master/experiments/gen-bounds/compute_path_norm.py | import math
import torch
import torchvision
import hess
from hess.nets import ConvNetDepth
import torchvision
from torchvision import transforms
from norms import lp_path_norm
def main():
## load in a loader just for sizing ##
transform = transforms.Compose(
[
transforms.Resize(32),
... | 1,985 | 30.52381 | 85 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/gen-bounds/file_mover.py | import torch
import os
import shutil
def main():
## model sizes ##
depths = torch.arange(9)
widths = torch.arange(4, 65, 4)
for d_ind, dpth in enumerate(depths):
for w_ind, wdth in enumerate(widths):
depth = dpth.item()
width = wdth.item()
print("depth ", d... | 873 | 24.705882 | 86 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/track-subnet-hessian/track_hessians_subnets.py | import math
import torch
import numpy as np
import pickle
from torch import nn
import hess
import hess.net_utils as net_utils
import hess.utils as utils
from hess.nets import MaskedNetLinear, SubNetLinear
# from hess.nets import MaskedLayerLinear, SubLayerLinear
#torch.set_default_tensor_type(torch.cuda.FloatTensor)
... | 4,480 | 30.118056 | 83 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-loss-surfaces/min_max_evals.py | """
compute hessian vector products as well as eigenvalues of the hessian
# copied from https://github.com/tomgoldstein/loss-landscape/blob/master/hess_vec_prod.py
# code re-written to use gpu by default and then to use gpytorch
"""
import torch
import time
import numpy as np
from torch import nn
from torc... | 3,673 | 35.019608 | 108 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-loss-surfaces/loss_surface_runner.py | import math
import torch
import hess
import hess.utils as utils
import hess.nets
import numpy as np
import pickle
import argparse
import os, sys
import time
from hess import data
import hess.nets as models
from parser import parser
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
i... | 3,603 | 31.468468 | 91 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-loss-surfaces/compute_loss_surface.py | import math
import torch
import numpy as np
import hess.utils as utils
def loss_getter(model, dataloader, criterion, use_cuda=False):
train_loss = 0.
for dd, data in enumerate(dataloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
if use_cuda:
... | 2,401 | 28.654321 | 76 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-loss-surfaces/loss_surface_evec_trainer.py | import math
import torch
import hess
import hess.utils as utils
import hess.nets
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from compute_loss_surface import get_loss_surface
from hess.utils import g... | 3,964 | 30.72 | 82 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/weightless-nns/networks.py | import torch
from torch import nn
import hess
import math
import torch.autograd as autograd
from hess.nets import SubLayerLinear
import torch.nn.functional as F
class GetSubnet(autograd.Function):
@staticmethod
def forward(ctx, scores, k):
# Get the subnetwork by sorting the scores and using the top k%... | 4,786 | 29.685897 | 72 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/blr/blr_loss.py | import math
import torch
from torch.distributions import MultivariateNormal
from torch.autograd import Variable
def features(x, p=10):
phi = torch.zeros(x.numel(), p)
ind = 0
for freq in range(p//2):
phi[:, ind] = torch.cos((freq+1)*math.pi*x)
ind += 1
phi[:, ind] = torch.sin((freq+... | 2,997 | 36.012346 | 96 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-homogeneity/min_max_evals.py | """
compute hessian vector products as well as eigenvalues of the hessian
# copied from https://github.com/tomgoldstein/loss-landscape/blob/master/hess_vec_prod.py
# code re-written to use gpu by default and then to use gpytorch
"""
import torch
import time
import numpy as np
from torch import nn
from torc... | 3,673 | 35.019608 | 108 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-homogeneity/hessian_evals.py | import torch
import time
import numpy as np
import hess
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from gpytorch.utils.lanczos import lanczos_tridiag, lanczos_tridiag_to_diag
from hess.utils import unflatten_like, gradtensor_to_tensor, eval_hess_vec_prod
def get_hessian_e... | 1,680 | 33.306122 | 81 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-homogeneity/perturbations.py | import math
import torch
import numpy as np
import matplotlib.pyplot as plt
import hess
import hess.utils as utils
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
... | 6,949 | 32.095238 | 90 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-homogeneity/compute_loss_surface.py | import math
import torch
import numpy as np
import hess.utils as utils
def loss_getter(model, dataloader, criterion, use_cuda=False):
train_loss = 0.
for dd, data in enumerate(dataloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
if use_cuda:
... | 2,401 | 28.654321 | 76 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-homogeneity/loss_surface_evec_trainer.py | import math
import torch
import hess
import hess.utils as utils
import hess.nets
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from compute_loss_surface import get_loss_surface
from min_max_evals impor... | 4,398 | 31.109489 | 82 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-homogeneity/train_cifar10.py | import math
import torch
import hess
import hess.utils as utils
import hess.nets
import numpy as np
import pickle
import argparse
import os, sys
import time
from hess import data
import hess.nets as models
from parser import parser
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
i... | 3,335 | 28.263158 | 80 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/cifar-homogeneity/get_evals.py |
import math
import torch
import hess
import hess.utils as utils
import hess.nets
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from hess.utils import get_hessian_eigs
import matplotlib.pyplot as plt
... | 2,601 | 26.680851 | 82 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/eigenvalues/fisher_vec_prod.py | """
compute hessian vector products as well as eigenvalues of the hessian
# copied from https://github.com/tomgoldstein/loss-landscape/blob/master/hess_vec_prod.py
# code re-written to use gpu by default and then to use gpytorch
"""
import torch
import time
# import numpy as np
# from torch import nn
# fr... | 6,420 | 33.154255 | 112 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/eigenvalues/hess_vec_prod.py | """
compute hessian vector products as well as eigenvalues of the hessian
# copied from https://github.com/tomgoldstein/loss-landscape/blob/master/hess_vec_prod.py
# code re-written to use gpu by default and then to use gpytorch
"""
import torch
import time
import numpy as np
from torch import nn
from torc... | 6,172 | 36.186747 | 110 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/eigenvalues/train_width.py | import math
import torch
import hess
import hess.utils as utils
import hess.nets
import numpy as np
import pickle
import argparse
import os, sys
import time
import tabulate
import swag.utils as training_utils
import swag
from hess import data
import hess.nets as models
from parser import parser
columns = ["ep", "lr",... | 5,954 | 32.455056 | 105 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/eigenvalues/run_hess_eigs.py | """
script to compute maximum and minimum eigenvalues of the hessian
"""
import argparse
import torch
# import torch.nn.functional as F
import numpy as np
# import os
# import tqdm
#from hess import models, data
from hess import data
import hess.nets as models
from hess_vec_prod import min_max_hessian_eigs
from... | 4,395 | 26.475 | 89 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/eigenvalues/fvp.py | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in th... | 5,883 | 36.96129 | 125 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/eigenvalues/train_depth.py | import math
import torch
import hess
import hess.utils as utils
import hess.nets
import numpy as np
import pickle
import argparse
import os, sys
import time
import tabulate
import swag.utils as training_utils
import swag
from hess import data
import hess.nets as models
from parser import parser
columns = ["ep", "lr",... | 5,964 | 32.511236 | 105 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/bnn_spirals/bnn_spirals_pyro.py | import pyro
import torch
import numpy as np
import matplotlib.pyplot as plt
import pyro.distributions as dist
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def twospirals(n_points, noise=.5, random_state=920):
"""
Returns the two spirals dataset.
"""
n = np.sqrt(np.random.rand(n_points,1)) *... | 2,970 | 40.263889 | 99 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/bnn_spirals/bnn_pyro.py | import pyro
import torch
import numpy as np
import pyro.distributions as dist
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def get_data(N=50, D_X=3, sigma_obs=0.05, N_test=500):
D_Y = 1 # create 1d outputs
np.random.seed(0)
X = np.linspace(-1, 1, N)
X = np.power(X[:, np.newaxis], np.arange(... | 2,470 | 34.811594 | 99 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/bnn_spirals/bnn_spirals.py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Bayesian Neural Network
=======================
We demonstrate how to use NUTS to do inference on a simple (small)
Bayesian neural network with two hidden layers.
"""
import argparse
import os
import time
import matplotlib
impor... | 5,733 | 35.993548 | 106 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/bnn_spirals/bnn.py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Bayesian Neural Network
=======================
We demonstrate how to use NUTS to do inference on a simple (small)
Bayesian neural network with two hidden layers.
"""
import argparse
import os
import time
import matplotlib
impor... | 5,605 | 32.771084 | 106 | py |
hessian-eff-dim | hessian-eff-dim-master/experiments/eff-dim-width/eff_dim_spirals.py | import math
import torch
import hess
import matplotlib.pyplot as plt
from hess.nets import Transformer
import hess.loss_surfaces as loss_surfaces
import numpy as np
import sklearn.datasets as datasets
import hess.utils as utils
import pickle
def twospirals(n_points, noise=.5, random_state=920):
"""
Returns th... | 3,540 | 30.336283 | 87 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/driver/multilevel_match.py | import os
import sys
import argparse
import json
import time
import t2vretrieval.models.mlmatch
import t2vretrieval.readers.rolegraphs
import torch.utils.data.dataloader as dataloader
import framework.run_utils
import framework.logbase
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_cf... | 4,900 | 40.888889 | 98 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/driver/global_match.py | import os
import sys
import argparse
import json
import time
import t2vretrieval.models.globalmatch
import t2vretrieval.readers.mpdata
import torch.utils.data.dataloader as dataloader
import framework.run_utils
import framework.logbase
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_cfg... | 4,326 | 41.421569 | 98 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/models/criterion.py | import torch
import torch.nn as nn
import framework.configbase
import framework.ops
def cosine_sim(im, s):
'''cosine similarity between all the image and sentence pairs
'''
inner_prod = im.mm(s.t())
im_norm = torch.sqrt((im**2).sum(1).view(-1, 1) + 1e-18)
s_norm = torch.sqrt((s**2).sum(1).view(1, -1) + 1e-1... | 2,756 | 32.216867 | 86 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/models/mlmatch.py | import numpy as np
import torch
import framework.ops
import t2vretrieval.encoders.mlsent
import t2vretrieval.encoders.mlvideo
import t2vretrieval.models.globalmatch
from t2vretrieval.models.criterion import cosine_sim
from t2vretrieval.models.globalmatch import VISENC, TXTENC
class RoleGraphMatchModelConfig(t2vret... | 9,203 | 40.459459 | 108 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/models/globalmatch.py | import os
import numpy as np
import collections
import json
import torch
import framework.ops
import framework.configbase
import framework.modelbase
import t2vretrieval.encoders.video
import t2vretrieval.encoders.sentence
import t2vretrieval.models.criterion
import t2vretrieval.models.evaluation
from t2vretrieval.mo... | 5,856 | 32.278409 | 102 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/readers/mpdata.py | import os
import json
import numpy as np
import torch.utils.data
BOS, EOS, UNK = 0, 1, 2
class MPDataset(torch.utils.data.Dataset):
def __init__(self, name_file, mp_ft_files, word2int_file, max_words_in_sent,
ref_caption_file=None, is_train=False, _logger=None):
if _logger is None:
self.print_fn = pr... | 3,136 | 28.87619 | 79 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/readers/rolegraphs.py | import os
import json
import numpy as np
import h5py
import collections
import torch
import t2vretrieval.readers.mpdata
ROLES = ['V', 'ARG1', 'ARG0', 'ARG2', 'ARG3', 'ARG4',
'ARGM-LOC', 'ARGM-MNR', 'ARGM-TMP', 'ARGM-DIR', 'ARGM-ADV',
'ARGM-PRP', 'ARGM-PRD', 'ARGM-COM', 'ARGM-MOD', 'NOUN']
class RoleGraphDataset(t... | 7,483 | 33.809302 | 102 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/encoders/mlsent.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
import framework.ops
import t2vretrieval.encoders.graph
import t2vretrieval.encoders.sentence
class RoleGraphEncoderConfig(t2vretrieval.encoders.sentence.SentEncoderConfig):
def __init__(self):
super().__init__()
... | 3,169 | 35.860465 | 111 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/encoders/video.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
class MPEncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super().__init__()
self.dim_fts = [2048]
self.dim_embed = 1024
self.dropout = 0
class MPEncoder(nn.Module):
def __init__(sel... | 836 | 23.617647 | 75 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/encoders/graph.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class GCNLayer(nn.Module):
def __init__(self, embed_size, dropout=0.0):
super().__init__()
self.embed_size = embed_size
self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False)
self.layernorm = nn.LayerNor... | 2,945 | 31.373626 | 76 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/encoders/mlvideo.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
class MultilevelEncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super().__init__()
self.dim_fts = [2048]
self.dim_embed = 1024
self.dropout = 0
self.num_levels = 3
self.share... | 1,657 | 29.145455 | 88 | py |
hgr_v2t | hgr_v2t-master/t2vretrieval/encoders/sentence.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import framework.configbase
from framework.modules.embeddings import Embedding
import framework.ops
class SentEncoderConfig(framework.configbase.ModuleConfig):
def __init__(self):
super().__init__()
self.num_words = 0
self.dim_word = 300... | 3,915 | 34.6 | 99 | py |
hgr_v2t | hgr_v2t-master/framework/modelbase.py | import os
import time
import json
import numpy as np
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import framework.logbase
class ModelBase(object):
def __init__(self, config, _logger=None, gpu_id=0):
'''initialize model
(support single GPU, otherwise need to ... | 7,771 | 35.660377 | 110 | py |
hgr_v2t | hgr_v2t-master/framework/ops.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
def l2norm(inputs, dim=-1):
# inputs: (batch, dim_ft)
norm = torch.norm(inputs, p=2, dim=dim, keepdim=True)
inputs = inpu... | 2,765 | 31.541176 | 81 | py |
hgr_v2t | hgr_v2t-master/framework/modules/embeddings.py | """ Embeddings module """
import math
import torch
import torch.nn as nn
class PositionalEncoding(nn.Module):
"""
Implements the sinusoidal positional encoding for
non-recurrent neural networks.
Implementation based on "Attention Is All You Need"
Args:
dim_embed (int): embedding size (even number)
... | 2,472 | 29.158537 | 76 | py |
hgr_v2t | hgr_v2t-master/framework/modules/global_attention.py | """ Global attention modules (Luong / Bahdanau) """
import torch
import torch.nn as nn
import torch.nn.functional as F
class GlobalAttention(nn.Module):
'''
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constru... | 4,651 | 38.423729 | 95 | py |
FRB | FRB-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# frb documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 13 13:39:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All c... | 9,974 | 30.767516 | 80 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/overlap/detect_overlaps_pyannote.py | #!/usr/local/env python
# Copyright 2021 Johns Hopkins University (Author: Desh Raj)
#
# Overlap detection using pretrained Pyannote models
import torch
import argparse
import glob
import os
import pathlib
def read_args():
parser = argparse.ArgumentParser()
parser.add_argument("wav_scp", help="Path to wav.sc... | 1,215 | 29.4 | 92 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/tfrnnlm/lstm_fast.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
# Copyright (C) 2017 Intellisist, Inc. (Author: Hainan Xu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apa... | 5,459 | 32.496933 | 112 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/tfrnnlm/lstm.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
# Copyright (C) 2017 Intellisist, Inc. (Author: Hainan Xu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apa... | 8,419 | 33.937759 | 112 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/pytorchnn/check_py.py | import numpy as np
import torch
| 32 | 10 | 18 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/pytorchnn/model.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nl... | 6,346 | 39.170886 | 98 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/pytorchnn/data.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
class Dictionary(object):
def __init__(self, oov):
self.word2idx = {}
self.idx2word = []
self.oov = oov
def read_vocab(self, path):
with open(pa... | 1,749 | 31.407407 | 81 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/pytorchnn/compute_sentence_scores.py | # Copyright 2020 Ke Li
""" This script computes sentence scores in a batch computation mode with a
PyTorch-trained neural LM.
It is called by steps/pytorchnn/lmrescore_{nbest, lattice}_pytorchnn.sh
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_funct... | 12,645 | 40.462295 | 110 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/pytorchnn/train.py | """ This script is modified based on the word language model example in PyTorch:
https://github.com/pytorch/examples/tree/master/word_language_model
An example of model training and N-best rescoring can be found here:
egs/swbd/s5c/local/pytorchnn/run_nnlm.sh
"""
from __future__ import absolute_import
from ... | 9,616 | 35.706107 | 83 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/pytorchnn/estimate_arc_nnlm_scores.py | # Copyright 2021 Ke Li
""" This script estimates neural LM scores for each arc on lattices.
It is called by steps/pytorchnn/lmrescore_lattice_pytorchnn.sh
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
from collections... | 5,644 | 43.801587 | 110 | py |
kaldi | kaldi-master/egs/wsj/s5/steps/libs/nnet3/xconfig/convolution.py | # Copyright 2018 Johns Hopkins University (Author: Dan Povey)
# 2016 Vijayaditya Peddinti
# Apache 2.0.
""" This module has the implementation of convolutional layers.
"""
from __future__ import print_function
from __future__ import division
import math
import re
import sys
from libs.nnet3.xconfig.ba... | 61,163 | 49.800664 | 107 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/eval.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
import torch
import torch.utils.data
import logging
from tqdm import tqdm
import argparse
import numpy as np
from datetime import datetime
from fairseq import checkpoint_utils, data, options, tasks
from ltlm.pyutils.logging_utils import setup_logger
from l... | 6,857 | 42.681529 | 116 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/modules/transformer_sentence_encoder.py | from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
MultiheadAttention,
TransformerSentenceEncoderLayer,
)
from fairseq.modules.quant_noise import quant_noise as apply_quant... | 10,315 | 35.452297 | 117 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/modules/lattice_positional_embedding.py | from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor
class LatticePositionalEmbedding(nn.Module):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, tied_embs=False):
super().__i... | 1,632 | 33.020833 | 99 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/pyutils/lattice_utils.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
import numpy as np
import torch
from queue import PriorityQueue
import time
import logging
import random
from collections import defaultdict
logger = logging.getLogger(__name__)
# my lats type = [(word_id, state_from, state_to), ...]
WORD_ID = 0
STATE_FRO... | 15,580 | 37.758706 | 112 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/pyscripts/draw_rescore_lats.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
import argparse
import graphviz
import logging
import torch
import torch.utils.data
import numpy as np
from tqdm import tqdm
import os
from fairseq import checkpoint_utils, data, options, tasks
import ltlm.eval
from ltlm.datasets import LatsOracleAlignData... | 4,047 | 41.610526 | 122 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/criterions/bce_loss.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
import torch.nn
import math
from typing import Any, Dict, List
import logging
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq import metrics, utils
logger = logging.getLogger(__name__)
@register_criterion('bce_loss')
class... | 2,340 | 38.016667 | 94 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/models/LTLM.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
from ltlm.modules.transformer_sentence_encoder import LatticeTransformerSentenceEncoder
import torch
import torch.nn as nn
from fairseq.models import (
BaseFairseqModel,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
im... | 10,091 | 46.158879 | 148 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/datasets/LatsOracleAlignDataSet.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
import torch
import numpy as np
import os
import argparse
import logging
import itertools
from ltlm.pyutils.lattice_utils import oracle_path, topsort_lat
from ltlm.Tokenizer import WordTokenizer
from ltlm.datasets import LatsDataSet
from ltlm.pyutils.loggi... | 9,620 | 45.703883 | 131 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/datasets/LatsDataSet.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
import torch.utils.data
import torch
import numpy as np
import os
import sys
from glob import glob
import pickle
from fairseq.data import FairseqDataset
from collections import Counter
import tqdm
import argparse
import time
import logging
from ltlm.pyuti... | 17,979 | 41.305882 | 144 | py |
kaldi | kaldi-master/egs/librispeech/s5/fairseq_ltlm/ltlm/tasks/rescoring_task.py | # Copyright 2021 STC-Innovation LTD (Author: Anton Mitrofanov)
import torch
import argparse
from collections import defaultdict
import os
from fairseq.tasks import FairseqTask, register_task
from ltlm.datasets import LatsOracleAlignDataSet, PerEpochWrapper
from ltlm.pyutils.data_utils import parse_lats_json
from lt... | 7,053 | 45.407895 | 130 | py |
DGM-MFCP | DGM-MFCP-main/DGM_MFCP.py | # -*- coding: utf-8 -*-
"""DGM_MFC.py
We construct a deep neural network to train the algorithm called Deep Galerkin Method (DGM).
The DGM is used to solve high dimensional PDE coming from the Hamilton-Jacobi-Bellman (HJB) equation
in Mean Field Control Problem (MFCP).
"""
import tensorflow as tf
"""# Model"""
# ... | 22,545 | 35.305958 | 117 | py |
visual-concepts | visual-concepts-master/run_mil.py | #!/usr/bin/env python
import h5py, math
import _init_paths
import os, sys
import sg_utils as utils
import preprocess
import coco_voc
from test_model import *
# import caffe
import argparse, pprint, sys
import numpy as np
from IPython.core.debugger import Tracer
def parse_args():
"""
Parse input arguments
"""
p... | 6,665 | 37.091429 | 117 | py |
visual-concepts | visual-concepts-master/_init_paths.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Set up paths."""
import os.path as osp
import sys
import platform
d... | 664 | 23.62963 | 58 | py |
visual-concepts | visual-concepts-master/test_model.py | import caffe
import numpy as np
import cv2
import sg_utils as utils
import cap_eval_utils
from IPython.core.debugger import Tracer
# import caffe
def load_model(prototxt_file, model_file, base_image_size, mean, vocab):
"""
Load the model from file. Includes pointers to the prototxt file,
caffemodel file name, ... | 7,703 | 39.761905 | 114 | py |
visual-concepts | visual-concepts-master/scripts/script_all.py | # Create directory, write splits and labels for training to be used for Caffe
mkdir output/v1
python run_mil.py --task compute_targets \
--train_dir output/v1/ --write_labels 1 --write_splits 1 \
--train_set train --val_set valid1 \
--vocab_file vocabs/vocab_train.pkl
# Command to launch the training
GLOG_logt... | 2,321 | 35.28125 | 77 | py |
cincer | cincer-main/main.py | import os, requests
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from influence.influence.influence_model import InfluenceModel
from sklearn.linear_model import LogisticRegression
from sklearn.metrics im... | 33,295 | 37.09611 | 124 | py |
cincer | cincer-main/negsup/utils.py | import pickle
import numpy as np
from tensorflow import keras
def set_bits(bits):
if bits == 32:
keras.backend.set_floatx('float32')
return np.float32
elif bits == 64:
keras.backend.set_floatx('float64')
return np.float64
else:
raise ValueError()
def load(path, **... | 510 | 19.44 | 43 | py |
cincer | cincer-main/negsup/datasets.py | import os, requests
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist, squareform
from copy import copy
from matplotlib import pyplot as plt
from sklearn.neighbors import BallTree
from tensorflow import keras
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMa... | 18,342 | 35.108268 | 94 | py |
cincer | cincer-main/negsup/models.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential
class KernelLogreg(Sequential):
def __init__(self, kwargs):
super(KernelLogreg, self).__init__(kwargs)
def call(self, inputs, **kwargs):
def fn(xi):
return tf.reshape(tf.matm... | 3,940 | 31.570248 | 84 | py |
lstm-syllabify | lstm-syllabify-master/neuralnets/BiLSTM.py | import numpy as np
import sys
import time
import os
import random
import logging
from .utils import try_tensorflow_import
try_tensorflow_import()
from tensorflow import keras
from tensorflow.keras.optimizers import Adam, Nadam, RMSprop, Adadelta, Adagrad, SGD
from tensorflow.keras.models import Model
from tensorflow... | 19,559 | 35.289425 | 127 | py |
lstm-syllabify | lstm-syllabify-master/neuralnets/keraslayers/ChainCRF.py | """
[Adapted from Philipp Gross
@ https://github.com/phipleg/keras/blob/crf/keras/layers/crf.py]
"""
from __future__ import absolute_import
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import regularizers
from tensorflow.keras import constrai... | 12,606 | 34.512676 | 97 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/main.py | import os
import os.path
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import argparse
import numpy as np
import time
from utils.utils_loss import partial_loss
from utils.models_v1 import mlp
from cifar_models import *
from datasets.mnist... | 23,398 | 42.983083 | 193 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/cifar100.py | from PIL import Image
import os
import os.path
import sys
import torch
import numpy as np
import pickle
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize, check_integrity, download_url
class cifar100(data.Dataset):
base_folder = 'cifar-100-python'
url = "http://www.cs.tor... | 5,653 | 33.901235 | 132 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/mnist_v2.py | from PIL import Image
import os
import os.path
import errno
import codecs
import numpy as np
import torch
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize
class mnist(data.Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Roo... | 7,357 | 36.350254 | 130 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/fashion_v1.py | from PIL import Image
import os
import os.path
import errno
import codecs
import numpy as np
import torch
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize
class fashion(data.Dataset):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
... | 7,057 | 36.147368 | 130 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/kmnist_v1.py | from PIL import Image
import os
import os.path
import errno
import codecs
import numpy as np
import torch
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize
class kmnist(data.Dataset):
"""`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset.
Args:
root... | 6,981 | 35.747368 | 130 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/cifar10_v1.py | from PIL import Image
import os
import os.path
import sys
import torch
import numpy as np
import pickle
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize, check_integrity, download_url
class cifar10(data.Dataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Da... | 6,456 | 37.664671 | 128 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/kmnist.py | from PIL import Image
import os
import os.path
import errno
import codecs
import numpy as np
import torch
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize
class kmnist(data.Dataset):
"""`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset.
Args:
root... | 6,490 | 34.664835 | 128 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/cifar10.py | from PIL import Image
import os
import os.path
import sys
import torch
import numpy as np
import pickle
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize, check_integrity, download_url
class cifar10(data.Dataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Da... | 6,484 | 36.923977 | 132 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/fashion.py | from PIL import Image
import os
import os.path
import errno
import codecs
import numpy as np
import torch
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize
class fashion(data.Dataset):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
... | 6,558 | 34.84153 | 128 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/mnist_v1.py | from PIL import Image
import os
import os.path
import errno
import codecs
import numpy as np
import torch
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize
class mnist(data.Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Roo... | 6,933 | 36.080214 | 130 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/cifar100_v1.py | from PIL import Image
import os
import os.path
import sys
import torch
import numpy as np
import pickle
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize, check_integrity, download_url
class cifar100(data.Dataset):
base_folder = 'cifar-100-python'
url = "http://www.cs.tor... | 5,626 | 34.613924 | 129 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/datasets/mnist.py | from PIL import Image
import os
import os.path
import errno
import codecs
import numpy as np
import torch
import torch.utils.data as data
from utils.utils_algo import binarize_class, partialize
class mnist(data.Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Roo... | 6,440 | 34.783333 | 128 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/cifar_models/resnet_v1.py | from __future__ import absolute_import
import torch.nn as nn
import math
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.init as init
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=require... | 13,492 | 31.829684 | 141 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/cifar_models/resnet.py | from __future__ import absolute_import
'''ResNet for cifar dataset.
Ported form
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
'''
import torch.nn as nn
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d... | 4,592 | 28.06962 | 77 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/cifar_models/convnet.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
__all__ = ['convnet']
'''ConvNet for cifar dataset.
Ported form
https://github.com/bhanML/Co-teaching/blob/master/model.py
'''
class convnet(nn.Module):
def __init__(self, input_channels=3, n_outputs=10, dropout_rate... | 2,710 | 32.8875 | 81 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/utils/utils_algo.py | import numpy as np
import torch
from sklearn.preprocessing import OneHotEncoder
import os
import hashlib
import errno
def binarize_class(y):
label = y.reshape(len(y), -1)
enc = OneHotEncoder(categories='auto')
enc.fit(label)
label = enc.transform(label).toarray().astype(np.float32)
label =... | 2,070 | 25.21519 | 69 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/utils/models_v1.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import torch.nn.init as init
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
c... | 8,559 | 34.081967 | 105 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/utils/models.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class linear(nn.Module):
def __init__(self, n_inputs, n_outputs):
super(linear, self).__init__()
self.L1 = nn.Linear(n_inputs, n_outputs)
init.xavier_uniform_(self.L1.weight)
def forward(s... | 1,792 | 26.584615 | 68 | py |
CMW-Net | CMW-Net-main/section7/Partial-Label_Learning/utils/utils_loss.py | import torch
import torch.nn.functional as F
import numpy as np
def partial_loss(output1, target, true):
output = F.softmax(output1, dim=1)
l = target * torch.log(output)
loss = (-torch.sum(l)) / l.size(0)
revisedY = target.clone()
revisedY[revisedY > 0] = 1
revisedY = revisedY * output
... | 461 | 23.315789 | 87 | py |
CMW-Net | CMW-Net-main/section4/Class_Imbalance/LT1.py | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]="1"
import time
import argparse
import random
import copy
import torch
import torchvision
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
from ... | 17,020 | 34.909283 | 131 | py |
CMW-Net | CMW-Net-main/section4/Class_Imbalance/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
import torch.nn.init as init
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class MetaModule(nn.Module):
... | 11,952 | 34.680597 | 120 | py |
CMW-Net | CMW-Net-main/section4/Class_Imbalance/data_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision
import numpy as np
import copy
from PIL import Image
class CIFAR10CI(torchvision.da... | 5,307 | 31.564417 | 104 | py |
CMW-Net | CMW-Net-main/section4/Class_Imbalance/LT2.py | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]="1"
import time
import argparse
import random
import copy
import torch
import torchvision
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
from ... | 16,992 | 34.850211 | 131 | py |
CMW-Net | CMW-Net-main/section4/Feature-independent_Label_Noise/PreResNet_MWN.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class MetaModule(nn.Module):
# adopted from: Adrien Ecoffet https:... | 15,286 | 33.822323 | 141 | py |
CMW-Net | CMW-Net-main/section4/Feature-independent_Label_Noise/dataloader_cifar.py | from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import random
import numpy as np
from PIL import Image
import json
import os
import torch
import torch.nn.functional as F
def unpickle(file):
import _pickle as cPickle
with open(file, 'rb') as fo:
d... | 9,291 | 43.888889 | 211 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.