repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
phocnet | phocnet-master/examples/prediction_example.py | import caffe
import numpy as np
def main():
# This example is going to show you how you can use the API to predict
# PHOCs from a trained PHOCNet for your own word images.
# First we need to load the trained PHOCNet. We are going to use the trained
# PHOCNet supplied at
# http://patrec.cs.tu-dort... | 2,767 | 44.377049 | 85 | py |
phocnet | phocnet-master/src/phocnet/evaluation/cnn.py | '''
Created on Jul 10, 2016
@author: ssudholt
'''
import logging
import numpy as np
from skimage.transform import resize
from phocnet.evaluation.retrieval import map_from_feature_matrix
def net_output_for_word_image_list(phocnet, word_img_list,
min_img_width_height=-1,input_layer... | 3,594 | 44.506329 | 113 | py |
phocnet | phocnet-master/src/phocnet/evaluation/phocnet_evaluator.py | '''
Created on Aug 29, 2016
@author: ssudholt
'''
import logging
import os
import caffe
import numpy as np
from skimage.transform import resize
from phocnet.caffe.model_proto_generator import ModelProtoGenerator
from phocnet.io.xml_io import XMLReader
from phocnet.io.context_manager import Suppressor
from phocnet.a... | 12,724 | 51.80083 | 136 | py |
phocnet | phocnet-master/src/phocnet/training/phocnet_trainer.py | '''
Created on Aug 29, 2016
@author: ssudholt
'''
import logging
import os
import time
import caffe
import numpy as np
from skimage.transform import resize
from phocnet.attributes.phoc import build_phoc, unigrams_from_word_list,\
get_most_common_n_grams
from phocnet.caffe.model_proto_generator import ModelProtoG... | 24,351 | 53.970655 | 141 | py |
phocnet | phocnet-master/src/phocnet/caffe/solver_proto_generator.py | '''
Created on Jul 9, 2016
@author: ssudholt
'''
from caffe.proto import caffe_pb2
from google.protobuf.internal.containers import RepeatedScalarFieldContainer
def generate_solver_proto(**kwargs):
sp = caffe_pb2.SolverParameter()
for k,v in kwargs.iteritems():
if not hasattr(sp, k):
raise ... | 734 | 32.409091 | 95 | py |
phocnet | phocnet-master/src/phocnet/caffe/model_proto_generator.py | # pylint: disable=too-many-arguments
'''
Created on Jul 8, 2016
@author: ssudholt
'''
import logging
from caffe import NetSpec
from caffe import layers as L
from caffe import params as P
from caffe.io import caffe_pb2
import argparse
class ModelProtoGenerator(object):
'''
Class for generating Caffe CNN model... | 9,101 | 49.566667 | 129 | py |
phocnet | phocnet-master/src/phocnet/caffe/lmdb_creator.py | '''
Created on Feb 18, 2016
@author: ssudholt
'''
import os
import shutil
import logging
import numpy as np
import lmdb
import caffe.io
# from patrec.serialization.list_io import LineListIO
class CaffeLMDBCreator(object):
def __init__(self):
'''
LMDB creator can create a single LMDB for single l... | 9,744 | 48.217172 | 139 | py |
pLogicNet | pLogicNet-master/kge/dataloader.py | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
from torch.utils.data import Dataset
class TrainDataset(Dataset):
def __init__(self, triples, nentity, nrelation, negative_sample_size, mode):
s... | 6,670 | 35.255435 | 107 | py |
pLogicNet | pLogicNet-master/kge/model.py | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import average_precision_score
from torch.utils.data import Data... | 18,193 | 37.222689 | 186 | py |
pLogicNet | pLogicNet-master/kge/run.py | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader impo... | 18,047 | 39.832579 | 113 | py |
GigaSpeech | GigaSpeech-main/utils/extract_subset_segments.py | #!/usr/bin/env python3
# coding=utf8
# Copyright 2022 Jiayu DU
'''
This tool is used to extract supervised segments from GigaSpeech,
segments are saved in .wav format, supervisions are saved in a simple .tsv file:
--- exampler tsv begin ---
ID AUDIO BEGIN DURATION TEXT
POD1000000004_S0000017 audio/POD1000000... | 3,742 | 39.247312 | 127 | py |
MIED | MIED-main/mied/validators/metrics.py | '''
Metrics to evaluate samples:
1. With ground truth samples, we can compute
a. Wasserstein distance between mu and mu^*
b. KL(mu^* || mu)
2. With access to ground truth density (which is always the case), we can compute
a. KSD, directly applicable
b. KL(mu || mu^*)
'''
import torch
import numpy as np... | 5,968 | 36.074534 | 83 | py |
MIED | MIED-main/mied/validators/particle.py | import torch
import numpy as np
import math
from mied.utils.h5_helpers import save_dict_h5
from mied.utils.batch_eval import batch_eval_index
from mied.validators.metrics import compute_metric
class ParticleValidator:
def __init__(self, *,
problem):
self.problem = problem
self.devi... | 3,751 | 32.20354 | 91 | py |
MIED | MIED-main/mied/solvers/ksdd.py | import torch
import numpy as np
from mied.utils.batch_jacobian import compute_jacobian
from mied.utils.kernels import GaussianKernel
from mied.solvers.particle_base import ParticleBasedSolver
def compute_ksd(X, grad_log_p, kernel):
'''
:param X: (B, D)
:param grad_log_p: (B, D)
:param kernel: an insta... | 2,133 | 30.382353 | 75 | py |
MIED | MIED-main/mied/solvers/lmc.py | import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
from mied.solvers.mirror_maps import BoxMap, BoxEntropicMap
class LMC(ParticleBasedSolver):
def __init__(self,
lmc_lr,
mirror_map,
... | 1,405 | 32.47619 | 90 | py |
MIED | MIED-main/mied/solvers/no_op_projector.py | import torch
from mied.solvers.projector_base import ProjectorBase
'''
Handle multiple constraints by projecting the gradients using
Dystra algorithm.
'''
class NoOpProjector(ProjectorBase):
def __init__(self):
pass
def step(self, X, update, problem):
return update
def get_violation(sel... | 343 | 17.105263 | 61 | py |
MIED | MIED-main/mied/solvers/particle_base.py | from abc import ABC, abstractmethod
import torch
import numpy as np
from pathlib import Path
from tqdm import trange
from mied.utils.batch_hessian import compute_hessian
class ParticleBasedSolver(ABC):
def __init__(self, *,
problem,
projector,
num_particle,
... | 7,284 | 31.092511 | 86 | py |
MIED | MIED-main/mied/solvers/dynamic_barrier.py | import torch
from mied.solvers.projector_base import ProjectorBase
from mied.utils.batch_jacobian import compute_jacobian
from mied.utils.proj_polyhedra import proj_polyhedra
'''
Handle multiple constraints by projecting the gradients using
Dystra algorithm.
'''
class DynamicBarrier(ProjectorBase):
def __init__(se... | 1,597 | 29.150943 | 73 | py |
MIED | MIED-main/mied/solvers/mirror_maps.py | from abc import ABC, abstractmethod
import torch
def safe_log(x):
# return torch.log(torch.maximum(1e-32, x))
# return torch.log(torch.maximum(1e-8, x))
return torch.log(x + 1e-32)
class MirrorMapBase(ABC):
@abstractmethod
def phi(self, theta):
pass
@abstractmethod
def nabla_phi(... | 1,416 | 22.616667 | 84 | py |
MIED | MIED-main/mied/solvers/svgd.py | import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
def svgd_update(P, grad_log_p, kernel='gaussian', kernel_h=-1,
riesz_s=-1, riesz_eps=1e-4):
'''
SVGD update with Gaussian kernel.
:param P: (B... | 2,656 | 29.193182 | 78 | py |
MIED | MIED-main/mied/solvers/projector_base.py | from abc import ABC, abstractmethod
import torch
class ProjectorBase:
def __init__(self):
pass
@abstractmethod
def step(self, particles, update_grad, problem, optimizer):
'''
Update particles given update directions update_grad while projecting
to the constraints given by ... | 422 | 19.142857 | 77 | py |
MIED | MIED-main/mied/solvers/mied.py | import torch
import numpy as np
import math
from mied.solvers.particle_base import ParticleBasedSolver
def log_exp_diff(a, b):
'''
Compute log|e^a - e^b| * sign(a-b)
:param a, b: torch scalars
'''
if a > b:
return a + torch.log(1 - torch.exp(b - a))
else:
return -(b + torch.log... | 4,561 | 29.824324 | 88 | py |
MIED | MIED-main/mied/solvers/ipd.py | import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
'''
Independent particle descent, a dumb baseline.
'''
class IPD(ParticleBasedSolver):
def __init__(self,
**kwargs):
super().__init__(**kwargs... | 798 | 25.633333 | 77 | py |
MIED | MIED-main/mied/problems/problem_base.py | from abc import ABC, abstractmethod
import torch
from mied.utils.batch_jacobian import compute_jacobian
class ProblemBase(ABC):
def __init__(self, *,
device,
in_dim):
'''
A problem describes the sampling problem with unnormalized density
p(x) and constraint... | 1,956 | 20.988764 | 82 | py |
MIED | MIED-main/mied/problems/logistics.py | import torch
import torch.distributions
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
from tqdm import tqdm
import scipy.io
from mied.problems.problem_base import ProblemBase
class BayesianLogistics(ProblemBase):
def __init__(self, *,
dev... | 5,753 | 32.649123 | 83 | py |
MIED | MIED-main/mied/problems/analytical_problems.py | import torch
import numpy as np
import math
from abc import ABC, abstractmethod
'''
bbox corresponds to the dimension of the variable (=dim) which is
not the same as the intrinsic dimension.
'''
def sample_simplex(dim, batch_size, device):
samples = torch.from_numpy(np.random.dirichlet(
torch.ones([dim]) ... | 17,849 | 31.046679 | 109 | py |
MIED | MIED-main/mied/problems/analytical.py | import torch
from mied.problems.problem_base import ProblemBase
class Analytical(ProblemBase):
def __init__(self, *,
bbox,
embed_dim,
log_p_fn,
prior_sample_fn,
eq_fn=None,
ineq_fn=None,
reparam_f... | 1,621 | 23.208955 | 63 | py |
MIED | MIED-main/mied/problems/fairness_bnn.py | import torch
from torch.distributions import Normal
import torch.nn.functional as F
import numpy as np
import random
from mied.problems.problem_base import ProblemBase
from mied.utils.adult_loader import load_data
# Using the same setup as https://proceedings.neurips.cc/paper/2021/hash/c61aed648da48aa3893fb3eaadd88a7... | 7,599 | 38.175258 | 122 | py |
MIED | MIED-main/mied/utils/kernels.py | from abc import ABC, abstractmethod
import torch
from mied.utils.batch_jacobian import compute_jacobian
class KernelBase(ABC):
@abstractmethod
def eval(self, X, Y):
'''
:param X: (B, D)
:param Y: (B, D)
:return: (B,)
'''
pass
@abstractmethod
def grad_1(... | 1,184 | 19.431034 | 79 | py |
MIED | MIED-main/mied/utils/batch_jacobian.py | import torch
def compute_jacobian(outputs, inputs,
create_graph=True, retain_graph=True):
'''
Compute Jacobian matrices in batch.
:param outputs: (..., D1)
:param inputs: (..., D2)
:returns: (..., D1, D2), computed Jacobian
'''
J = torch.cat([
torch.autograd.gr... | 631 | 27.727273 | 77 | py |
MIED | MIED-main/mied/utils/batch_hessian.py | import torch
from mied.utils.batch_jacobian import compute_jacobian
def compute_hessian(func, inputs):
'''
Compute Hessianmatrices in batch.
:param func: (B, D) -> (B,)
:param inputs: (B, D)
:returns: (B, D, D)
'''
outputs = func(inputs) # (B,)
grad = compute_jacobian(outputs.unsqueeze... | 420 | 21.157895 | 79 | py |
MIED | MIED-main/mied/utils/random.py | import torch
import numpy as np
import random
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 280 | 20.615385 | 42 | py |
MIED | MIED-main/mied/utils/proj_polyhedra.py | import torch
def proj_halfspace(p, c, y):
'''
Project p to halfspace defined by {x: c^T x <= y}.
:param p: (B, D)
:param c: (B, D)
:param y: (B,)
:return: (B, D), projected points
'''
norm = torch.norm(c, dim=-1) + 1e-8 # (B,)
c = c / norm.unsqueeze(-1) # (B, D)
y = y / norm #... | 3,305 | 32.393939 | 94 | py |
MIED | MIED-main/mied/utils/h5_helpers.py | import torch
import numpy as np
import h5py
from pathlib import Path
def save_dict_h5(save_dict, h5_path, create_dir=False):
def recurse(remain_dict, parent_handle):
for k, v in remain_dict.items():
if isinstance(v, dict):
child_handle = parent_handle.create_group(k)
... | 893 | 33.384615 | 63 | py |
MIED | MIED-main/mied/utils/ec.py | import torch
import argparse
import yaml
import copy
from pathlib import Path
import shutil
from datetime import datetime
from uuid import uuid4
from collections import namedtuple
import wandb
from mied.utils.shortname import \
convert_method_cls_to_str, convert_method_str_to_cls, \
convert_projector_cls_to_st... | 11,026 | 35.392739 | 82 | py |
MIED | MIED-main/mied/utils/batch_eval.py | import torch
from tqdm import tqdm
def batch_eval_index(f, total_count, batch_size=1024,
result_device=torch.device('cpu'),
detach=True,
no_tqdm=False):
'''
Batch evaluate f.
:param f: function to be evalutated. It should take in (B,) of indic... | 1,050 | 32.903226 | 76 | py |
MIED | MIED-main/tests/analytical/run.py | import torch
import argparse
from pathlib import Path
import math
import wandb
import matplotlib.pyplot as plt
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.problems.analytical_problems import create_problem
from... | 4,651 | 33.977444 | 77 | py |
MIED | MIED-main/tests/fairness_bnn/run.py | import torch
import argparse
from pathlib import Path
import math
import h5py
import wandb
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.utils.h5_helpers import save_dict_h5
from mied.problems.fairness_bnn import F... | 2,025 | 27.138889 | 60 | py |
MIED | MIED-main/tests/logistics/run.py | import torch
import argparse
from pathlib import Path
import math
import h5py
import wandb
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.utils.h5_helpers import save_dict_h5
from mied.problems.logistics import Baye... | 4,112 | 32.991736 | 83 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/experiment.py | import time
import torch
import math
import os
import random
import datetime
from pathlib import Path
import numpy as np
from explib import config
from explib.expmaker.experiment_defs import make_wuuid, exp_dict_to_str
from . import logging, problem
class Experiment:
def __init__(
self,
exp_dict,... | 5,622 | 34.815287 | 88 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/util.py | import torch
import torch.nn as nn
def get_grads(model):
res = []
for p in model.parameters():
if p.requires_grad:
res.append(p.grad.view(-1))
grad_flat = torch.cat(res)
return grad_flat
INIT_STD = 0.02
PROJ_INIT_STD = 0.01
def init_weight(weight):
nn.init.normal_(weight, 0... | 2,310 | 30.22973 | 73 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/language_loader.py | import os, sys
import glob
from collections import Counter, OrderedDict
import numpy as np
import torch
import subprocess
# Code copied from https://github.com/kimiyoung/transformer-xl
from explib import config
class Vocab(object):
def __init__(
self,
special=[],
min_freq=0,
max_... | 17,819 | 31.459016 | 88 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/glue_loader.py | import os
import random
from explib import config
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
)
MAX_LENGTH = 128
EVAL_BAS... | 6,484 | 34.828729 | 117 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/torchvision_loader.py | import os
import torch
import torchvision
from explib import config
from torchvision import transforms
from torchvision.datasets import MNIST, USPS
def torchvision_loader(dataset_name, batch_size, drop_last=False, shuffle=True):
if dataset_name == "mnist":
loader = MNIST
elif dataset_name == "usps":
... | 1,511 | 26.490909 | 80 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/squad_loader.py | import tokenize
import datasets
import os
from datasets import load_dataset
from accelerate import Accelerator
from explib import config
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoTokenizer,
DataCollatorWithPadding,
)
from torch.utils.data.dataloader import DataLoader
import numpy as... | 11,801 | 38.209302 | 118 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/cifar_loader.py | import os
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from explib import config
def cifar_loader(
batch_size,
load_100=False,
drop_last=False,
fake_full_batch_mode=False,
shuffle=True,
):
data_class = "CIFAR100" if load_100 else "CIFAR10"
... | 1,561 | 23.40625 | 78 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/__init__.py | """Datasets.
General interface to load a dataset
"""
import os
from pathlib import Path
from explib import config
from .cifar_loader import cifar_loader
from .glue_loader import glue_loader
from .language_loader import ptb_loader, wikitext2_loader
from .squad_loader import squad_loader
from .torchvision_loader impor... | 2,805 | 26.242718 | 79 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/signum.py | import torch
from torch.optim import Optimizer
class Signum(Optimizer):
r"""
Code taken from https://github.com/jiaweizzhao/Signum_pytorch/blob/master/Example/signum.py
Implements Signum optimizer that takes the sign of gradient or momentum.
See details in the original paper at:https://arxiv.org/abs/... | 3,282 | 36.735632 | 95 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/normalized_gd.py | import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer, required
from torch.nn.utils import parameters_to_vector as p2v
from typing import List, Optional
class CopyOfSGD(Optimizer):
def __init__(
self,
params,
lr=required,
momentum=0,
dampening=0,... | 7,502 | 30.004132 | 101 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/modified_adam.py | from torch.optim import Optimizer
import math
import torch
from torch import Tensor
from typing import List, Optional
def f_modifiedadam(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
... | 6,017 | 34.192982 | 104 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/__init__.py | """Optimizers
Generic interface to build optimizers by name,
possibly interfacing with pytorch
"""
import json
import torch
from .signum import Signum
from .modified_adam import ModifiedAdam
from .normalized_gd import (
PlainSGD,
NormalizedSGD,
BlockNormalizedSGD,
SignSGD,
RescaledSignDescent,
)
... | 2,628 | 24.77451 | 87 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/clipped_sgd.py | import itertools
import torch
from torch import Tensor
from torch.optim import SGD
from torch.optim.optimizer import Optimizer, required
from torch.nn.utils import parameters_to_vector as p2v
from typing import List, Optional
class ClippedGD(SGD):
def __init__(
self,
params,
lr=required,
... | 1,091 | 24.395349 | 94 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/problem.py | import torch
from torch.nn.utils import parameters_to_vector as p2v
from abc import ABCMeta, abstractmethod
from explib import config
from ..util import get_grads, enable_running_stats, disable_running_stats
import os
import numpy as np
from pathlib import Path
import csv
from ..dataset import *
class Problem(metacla... | 6,790 | 32.78607 | 84 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/bert_squad_prob.py | import csv
import torch
from accelerate import Accelerator
from datasets import load_metric
from .. import dataset, model, optim
from .problem import Problem
class BertSquadProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
(
self.train_dataloader,
s... | 4,505 | 31.185714 | 87 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/image_prob.py | import torch
import torch.nn.functional as F
from .. import dataset, model, optim
from .problem import Problem
class ImageProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
self.train_dataloader, self.valid_dataloader = dataset.init(
self.dataset_name,
... | 2,823 | 29.042553 | 86 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/simple_prob.py | from .problem import Problem
from .. import dataset, model, optim
import torch
class SimpleProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
self.train_dataloader, self.valid_dataloader = dataset.init(
self.dataset_name,
self.batch_size,
s... | 2,272 | 28.141026 | 86 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/transformer_prob.py | import csv
import math
import torch
from .. import dataset, model, optim
from .problem import Problem
class TransformerProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
init_outputs = dataset.init(
self.dataset_name,
self.batch_size,
sel... | 4,995 | 31.025641 | 86 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/full_connected.py | import torch
from torch import nn
import copy
class FullyConnected(nn.Module):
def __init__(self, input_dim=3 * 32 * 32, width=100, depth=3, num_classes=10):
super(FullyConnected, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_cla... | 935 | 26.529412 | 82 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/transformer_xl.py | import math
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, de... | 38,100 | 33.356177 | 119 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/resnet.py | import torchvision.models as models
def getResNet(size, pretrained=False):
if size == 50:
return models.resnet50(pretrained=pretrained)
elif size == 34:
return models.resnet34(pretrained=pretrained)
elif size == 101:
return models.resnet101(pretrained=pretrained)
elif size == 1... | 377 | 28.076923 | 54 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/bert_glue.py | import os
import random
from explib import config
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
)
def get_bert_glue(model_... | 1,138 | 28.205128 | 77 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/transformer_encoder.py | """
Simple transformer architecture used as introduction by the pytorch team
https://pytorch.org/tutorials/beginner/transformer_tutorial.html
Version used
https://github.com/pytorch/tutorials/blob/a981886fd8f1793ac5808b26e75dd50b788eb4e5/beginner_source/transformer_tutorial.py
Code covered by
See pytorch_
Copyright ... | 2,568 | 33.253333 | 122 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/letnet5.py | import torch
from torch import nn
class LeNet5(nn.Module):
def __init__(self, n_classes, in_channels=3):
super(LeNet5, self).__init__()
self.feature_extractor = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=6, kernel_size=5, stride=1),
nn.Tanh(),
... | 961 | 30.032258 | 88 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/bert_base_pretrained.py | from datasets import load_metric
import numpy as np
from typing import Optional, Tuple
import json
import collections
import os
import torch
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
EvalPrediction,
)
from .. import logging
def get_bert_base_pretrained():
config = AutoConfi... | 18,940 | 39.997835 | 119 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/linear_model.py | import torch
class LinearModel(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(inputSize, outputSize)
def forward(self, X):
out = self.linear(X)
return out
| 278 | 22.25 | 60 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/naf_pendulum.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import NAFAgent
from rl.memory import Seq... | 6,696 | 43.059211 | 122 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/dqn_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents.dqn import DQNAgent
from rl.core import Processor
from rl.memory imp... | 6,113 | 42.056338 | 133 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/duel_dqn_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents.dqn import DQNAgent
from rl.core import Processor
from rl.memory imp... | 6,413 | 43.541667 | 138 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/sarsa_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import SARSAAgent
from rl.core import Processor
from rl.policy impor... | 5,869 | 39.482759 | 137 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/ddpg_pendulum.py | import argparse
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import DDPGAgent
from rl.core import Processor
from rl.memor... | 6,589 | 44.763889 | 124 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/cem_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
import tensorflow as tf
from rl.agents.cem import CEMAgent
from rl.memory import EpisodeParameterMemory
from noise_estimator import CartpoleP... | 5,860 | 39.42069 | 122 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/callbacks.py | from __future__ import division
from __future__ import print_function
import warnings
import timeit
import json
from tempfile import mkdtemp
import numpy as np
from keras import __version__ as KERAS_VERSION
from keras.callbacks import Callback as KerasCallback, CallbackList as KerasCallbackList
from keras.utils.gener... | 16,229 | 40.829897 | 423 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/core.py | # -*- coding: utf-8 -*-
import warnings
from copy import deepcopy
import numpy as np
from keras.callbacks import History
from rl.callbacks import (
CallbackList,
TestLogger,
TrainEpisodeLogger,
TrainIntervalLogger,
Visualizer
)
class Agent(object):
"""Abstract base class for all implemented ... | 29,790 | 41.018336 | 202 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/util.py | import numpy as np
from keras.models import model_from_config, Sequential, Model, model_from_config
import keras.optimizers as optimizers
import keras.backend as K
def clone_model(model, custom_objects={}):
# Requires Keras 1.0.7 since get_config has breaking changes.
config = {
'class_name': model._... | 4,476 | 32.410448 | 116 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/ddpg.py | from __future__ import division
from collections import deque
import os
import warnings
import numpy as np
import keras.backend as K
import keras.optimizers as optimizers
from rl.core import Agent
from rl.random import OrnsteinUhlenbeckProcess
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.ma... | 14,524 | 44.820189 | 195 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/sarsa.py | import collections
import numpy as np
from keras.callbacks import History
from keras.models import Model
from keras.layers import Input, Lambda
import keras.backend as K
from rl.core import Agent
from rl.agents.dqn import mean_q
from rl.util import huber_loss
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
fro... | 9,668 | 40.320513 | 121 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/dqn.py | from __future__ import division
import warnings
import keras.backend as K
from keras.models import Model
from keras.layers import Lambda, Input, Layer, Dense
from rl.core import Agent
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pr... | 33,631 | 44.204301 | 250 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/cem.py | from __future__ import division
from collections import deque
from copy import deepcopy
import numpy as np
import keras.backend as K
from keras.models import Model
from rl.core import Agent
from rl.util import *
class CEMAgent(Agent):
"""Write me
"""
def __init__(self, model, nb_actions, memory, batch_si... | 6,679 | 36.740113 | 136 | py |
text-classification-cnn-rnn | text-classification-cnn-rnn-master/predict.py | # coding: utf-8
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.contrib.keras as kr
from cnn_model import TCNNConfig, TextCNN
from data.cnews_loader import read_category, read_vocab
try:
bool(type(unicode))
except NameError:
unicode = str
base_dir = 'data/cnews'
vo... | 1,694 | 28.736842 | 104 | py |
text-classification-cnn-rnn | text-classification-cnn-rnn-master/data/cnews_loader.py | # coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.keras as kr
if sys.version_info[0] > 2:
is_py3 = True
else:
reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转... | 3,386 | 25.255814 | 92 | py |
alibi-detect | alibi-detect-master/setup.py | from setuptools import find_packages, setup
def readme():
with open("README.md", encoding="utf-8") as f:
return f.read()
# read version file
exec(open("alibi_detect/version.py").read())
extras_require = {
"prophet": [
"prophet>=1.1.0, <2.0.0",
],
"torch": [
"torch>=1.7.0, <1... | 2,931 | 33.494118 | 118 | py |
alibi-detect | alibi-detect-master/testing/test_notebooks.py | """
This script is an example of using `jupytext` to execute notebooks for testing instead of relying on `nbmake`
plugin. This approach may be more flexible if our requirements change in the future.
"""
import glob
from pathlib import Path
import shutil
import pytest
from jupytext.cli import jupytext
try:
from fb... | 2,694 | 38.632353 | 109 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/base.py | from alibi_detect.utils.missing_optional_dependency import import_optional
from typing import Union
from typing_extensions import Literal, Protocol, runtime_checkable
# Use Protocols instead of base classes for the backend associated objects. This is a bit more flexible and allows us to
# avoid the torch/tensorflow ... | 2,818 | 36.092105 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/vaegmm.py | import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Callable, Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import VAEGMM, eucl_cosim_features
from alibi_detect.models.tensorflow.gmm import gmm_energy, gmm_params
from alibi_detect.models.tensor... | 9,744 | 37.98 | 107 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_svm.py | from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import numpy as np
from alibi_detect.base import (BaseDetector, FitMixin, ThresholdMixin,
outlier_prediction_dict)
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.pytorch import SgdS... | 10,687 | 41.923695 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_lof.py | from typing import Callable, Union, Optional, Dict, Any, List, Tuple
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.base import Transfor... | 9,133 | 40.899083 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/seq2seq.py | import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Dict, Tuple, Union
from alibi_detect.models.tensorflow.autoencoder import Seq2Seq, EncoderLSTM, DecoderLSTM
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base... | 13,595 | 40.075529 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_mahalanobis.py | from typing import Union, Optional, Dict, Any
from typing import TYPE_CHECKING
from alibi_detect.exceptions import _catch_error as catch_error
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.od.pyt... | 6,935 | 37.966292 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/vae.py | import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import VAE
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.models.tensorflow.losses import elbo
from alibi_detect.bas... | 11,444 | 37.15 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_pca.py | from typing import Union, Optional, Callable, Dict, Any
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.base import BaseDetector, ThresholdMixin, FitMixin
from alibi_detect.od.pytorch import KernelPCATorc... | 8,089 | 37.160377 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_knn.py | from typing import Callable, Union, Optional, Dict, Any, List, Tuple
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.base import Transfor... | 9,117 | 40.634703 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/llr.py | from functools import partial
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow_probability.python.distributions.distribution import Distribution
from typing import Callable, Dict, Tuple, Union
from alibi_detect.... | 14,091 | 36.280423 | 109 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/aegmm.py | import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import AEGMM, eucl_cosim_features
from alibi_detect.models.tensorflow.gmm import gmm_energy, gmm_params
from alibi_detect.models.tensorflow.losses import loss_aegmm
from ali... | 7,829 | 35.933962 | 107 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/ae.py | import logging
import numpy as np
import tensorflow as tf
from typing import Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import AE
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.ut... | 9,396 | 35.003831 | 109 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_gmm.py | from typing import Union, Optional, Dict, Any, TYPE_CHECKING
import numpy as np
from alibi_detect.utils._types import Literal
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.base import BaseDetector, ThresholdMixin, FitMixin
from alibi_detect.od.pytorch import GMMTorch
from alibi_detect.od.skl... | 9,837 | 41.042735 | 170 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_aegmm.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierAEGMM
from alibi_detect.version import __version__
threshold = [None, 5.]
n_gmm = [1, 2]
w_energy = [.1, .5]
threshold_perc = [90.]
return_inst... | 3,007 | 31.695652 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_llr.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, LSTM
from alibi_detect.od import LLR
from alibi_detect.version import __version__
input_dim = 5
hidden_dim = 20
shape = (1000, 6)
X_train = np.zeros(shape, dtype=np.int32)
X_train[:... | 3,587 | 34.524752 | 101 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_ae.py | from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierAE
from alibi_detect.version import __version__
threshold = [None, 5.]
threshold_perc = [90.]
return_ins... | 3,537 | 33.349515 | 114 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_vae.py | from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierVAE
from alibi_detect.models.tensorflow.losses import elbo
from alibi_detect.version import __version__
... | 3,862 | 33.801802 | 94 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_ensemble.py | import pytest
import torch
from alibi_detect.od.pytorch import ensemble
from alibi_detect.exceptions import NotFittedError
def test_pval_normalizer():
"""Test the PValNormalizer
- Test the PValNormalizer correctly normalizes data passed to it
- Test the PValNormalizer throws the correct errors if not fi... | 7,122 | 34.08867 | 113 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.