repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
covid-chestxray-dataset | covid-chestxray-dataset-master/tests/test_dataloader.py | import pytest
import torch
import torchvision
import torchxrayvision as xrv
from tqdm import tqdm
import sys
def test_dataloader_stats():
# print stats
for views in [["PA","AP"],["AP Supine"]]:
print(xrv.datasets.COVID19_Dataset(views=views,
imgpath="images... | 778 | 30.16 | 77 | py |
GAROM | GAROM-main/garom.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import pandas as pd
# simple Generator Network
class Generator(nn.Module):
def __init__(self, input_dimension, parameters_dimension,
noise_dimension, activation=torch.nn.SiLU):
super... | 16,138 | 36.272517 | 132 | py |
GAROM | GAROM-main/experiments/POD/pod_graetz.py | import pandas as pd
import numpy as np
from ezyrb import POD, RBF, Database, GPR, ANN, AE
from ezyrb import ReducedOrderModel as ROM
import matplotlib.pyplot as plt
from smithers.dataset import GraetzDataset
import torch.nn as nn
from utils import get_args
args = get_args()
data = GraetzDataset()
snap_training = 160
... | 3,300 | 27.95614 | 70 | py |
GAROM | GAROM-main/experiments/POD/pod_gaussian.py | import pandas as pd
import numpy as np
from ezyrb import POD, RBF, Database, GPR, ANN, AE
from ezyrb import ReducedOrderModel as ROM
import matplotlib.pyplot as plt
import torch.nn as nn
from utils import get_args
args = get_args()
class ParametricGaussian(object):
def __init__(self, nx=30, ny=30, domain=[-1, 1... | 4,356 | 27.477124 | 74 | py |
GAROM | GAROM-main/experiments/POD/pod_lid.py | import pandas as pd
import numpy as np
from ezyrb import POD, RBF, Database, GPR, ANN, AE
from ezyrb import ReducedOrderModel as ROM
import matplotlib.pyplot as plt
import torch.nn as nn
from utils import get_args
from smithers.dataset import LidCavity
args = get_args()
data = LidCavity()
snap_training = 240
key = 'm... | 3,301 | 27.465517 | 70 | py |
GAROM | GAROM-main/experiments/GAROM/plot_results.py | import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
def plot_POD_vs_GAROM(data_train, data_test, dataset, hidden_dim, garom, path_save, key=None):
from ezyrb import POD, RBF, Database
from ezyrb import ReducedOrde... | 10,714 | 34.131148 | 164 | py |
GAROM | GAROM-main/experiments/GAROM/lid.py | from garom import GAROM
import torch
from plot_results import plot_POD_vs_GAROM, plot_densities_generator, save_data
from utils import preprocessing_lidcavity, assess_model_quality, get_args
from smithers.dataset import LidCavity
import time
# get args parser
args = get_args()
path = args.path
if path is not None:
... | 2,661 | 29.597701 | 122 | py |
GAROM | GAROM-main/experiments/GAROM/utils.py | from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
class ParametricGaussian(object):
def __init__(self, nx=30, ny=30, domain=[-1, 1], numpy=False) -> None:
import torch
import matplotlib
import matplotlib.tri as tri
... | 15,951 | 33.23176 | 111 | py |
GAROM | GAROM-main/experiments/GAROM/gaussian.py | from garom import GAROM
import torch
from plot_results import plot_POD_vs_GAROM, plot_densities_generator, save_data
from utils import preprocessing_gaussian, assess_model_quality, get_args
import time
from utils import ParametricGaussian
# get args parser
args = get_args()
path = args.path
if path is not None:
... | 2,687 | 29.896552 | 122 | py |
GAROM | GAROM-main/experiments/GAROM/graetz.py | from smithers.dataset import GraetzDataset
from garom import GAROM
import torch
from plot_results import plot_POD_vs_GAROM, plot_densities_generator, save_data
from utils import preprocessing_graetz, assess_model_quality, get_args
import time
# get args parser
args = get_args()
path = args.path
if path is not None:
... | 2,687 | 29.896552 | 122 | py |
GAROM | GAROM-main/experiments/GAROM/garom.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import pandas as pd
# simple Generator Network
class Generator(nn.Module):
def __init__(self, input_dimension, parameters_dimension,
noise_dimension, activation=torch.nn.SiLU):
super... | 16,138 | 36.272517 | 132 | py |
libact | libact-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# libact documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 1 23:21:58 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# aut... | 11,069 | 31.654867 | 87 | py |
LAION-SAFETY | LAION-SAFETY-main/laionsafety.py | image_size =260 # resolution of the image classifier
batchsize=1024 #batchsize for inference. Lower if you get OOM errors
datadir = "./laion400m-dat-release/" # dir where the tar files are located
SHARDS = "{00000..00002}.tar" # format of the tar files
targetdir1= "./drawings/"
targetdir2= "./hentai/"
targetdir3= ... | 5,261 | 27.912088 | 155 | py |
VELOCIraptor-STF | VELOCIraptor-STF-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# VELOCIraptor documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 31 10:13:40 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
... | 5,697 | 29.967391 | 79 | py |
erc | erc-main/train-erc-text-hp.py | """Hyperparameter tuning script"""
import argparse
import json
import logging
import os
import torch
import yaml
from transformers import (AutoModelForSequenceClassification, AutoTokenizer,
Trainer, TrainingArguments)
from utils import ErcTextDataset, get_num_classes
logging.basicConfig(
... | 4,199 | 26.45098 | 92 | py |
erc | erc-main/app.py | """Emoberta app"""
import argparse
import logging
import os
import jsonpickle
import torch
from flask import Flask, request
from transformers import AutoModelForSequenceClassification, AutoTokenizer
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)... | 3,226 | 24.015504 | 90 | py |
erc | erc-main/train-erc-text-full.py | """Full training script"""
import argparse
import json
import logging
import os
import torch
import yaml
from transformers import (AutoModelForSequenceClassification, AutoTokenizer,
Trainer, TrainingArguments)
from utils import ErcTextDataset, compute_metrics, get_num_classes
logging.basicC... | 4,828 | 27.239766 | 90 | py |
erc | erc-main/utils/utils.py | """utility and helper functions / classes."""
import json
import logging
import os
import random
from typing import Tuple
import numpy as np
import torch
from sklearn.metrics import f1_score
from tqdm import tqdm
from transformers import AutoTokenizer
logging.basicConfig(
level=logging.INFO,
format="%(asctime... | 14,698 | 33.183721 | 90 | py |
sps | sps-master/setup.py | from setuptools import setup
setup(name='sps',
version='0.6.0',
description='Stochastic Polyak Step-size',
url='git@github.com:IssamLaradji/sps.git',
maintainer='Issam Laradji',
maintainer_email='issam.laradji@gmail.com',
license='MIT',
packages=['sps'],
zip_safe=False,
... | 596 | 24.956522 | 49 | py |
sps | sps-master/trainval.py | import torch
import torchvision
import tqdm
import pandas as pd
import pprint
import math
import itertools
import os, sys
import pylab as plt
import exp_configs
import time
import numpy as np
import torch.nn as nn
from src import models
from src import datasets
from src import optimizers
from src import utils as ut
fro... | 5,174 | 32.603896 | 100 | py |
sps | sps-master/sps/sps.py | import numpy as np
import torch
import time
import copy
class Sps(torch.optim.Optimizer):
def __init__(self,
params,
n_batches_per_epoch=500,
init_step_size=1,
c=0.5,
gamma=2.0,
eta_max=None,
ada... | 4,887 | 31.157895 | 99 | py |
sps | sps-master/src/base_classifiers.py | import torch
from torch import nn
from torch.nn import functional as F
import math
# from .base import mxresnet
import torchvision.models as models
def get_classifier(clf_name, train_set):
if clf_name in ["linear", "logistic"]:
batch = train_set[0]
model = Mlp_model(input_size=batch['images'].... | 13,350 | 31.484185 | 171 | py |
sps | sps-master/src/optimizers.py | import numpy as np
import torch
import time
import copy
from sps import Sps
def get_optimizer(opt_dict, params, train_loader, exp_dict):
"""
opt: name or dict
params: model parameters
n_batches_per_epoch: b/n
"""
opt_name = opt_dict['name']
# our optimizers
n_train = len(train_loade... | 1,548 | 30.612245 | 91 | py |
sps | sps-master/src/utils.py | import hashlib
import pickle
import json
import os
import itertools
import torch
import numpy as np
import tqdm
def opt_step(name, opt, model, batch, loss_function, use_backpack, epoch):
device = next(model.parameters()).device
images, labels = batch["images"].to(device=device), batch["labels"].to(device=dev... | 3,905 | 29.046154 | 113 | py |
sps | sps-master/src/datasets.py | import torchvision
from sklearn.model_selection import train_test_split
from torchvision import transforms
import torch
from sklearn import metrics
from src import utils as ut
from torch.utils.data import Dataset
import tqdm
def get_dataset(dataset_name, split, datadir, exp_dict):
train_flag = True if split == 't... | 18,117 | 34.806324 | 137 | py |
sps | sps-master/src/models.py | # -*- coding: utf-8 -*-
import os, pprint, tqdm
import numpy as np
import pandas as pd
from haven import haven_utils as hu
from haven import haven_img as hi
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
from . import base_classifiers
from . import optimizers
def get_model(train_loade... | 7,837 | 29.498054 | 132 | py |
sps | sps-master/src/metrics.py | import torch
import tqdm
from torch.utils.data import DataLoader
def get_metric_function(metric_name):
if metric_name == "logistic_accuracy":
return logistic_accuracy
if metric_name == "softmax_accuracy":
return softmax_accuracy
elif metric_name == "softmax_loss":
return softmax... | 3,408 | 24.440299 | 85 | py |
sps | sps-master/tests/test_basic.py | import sys, os
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, path)
import unittest
import numpy as np
import os
import torch
import shutil
from haven import haven_utils as hu
from haven import haven_results as hr
from haven import haven_chk as hc
from haven import haven_job... | 1,226 | 22.150943 | 67 | py |
GoTube | GoTube-main/main.py | import numpy as np
import jax.numpy as jnp
import benchmarks as bm
import stochastic_reachtube as reach
import go_tube
import configparser
import time
from performance_log import log_args
from performance_log import close_log
from performance_log import create_plot_file
from performance_log import write_plot_file
from... | 4,930 | 32.773973 | 104 | py |
GoTube | GoTube-main/benchmarks.py | # different classes with benchmarks
import jax.numpy as np
from jax.numpy import tanh
from jax.numpy import sin
from jax.numpy import cos
from jax.numpy import exp
def get_model(benchmark, radius=None):
if benchmark == "bruss":
return Brusselator(radius) # Benchmark to run
elif benchmark == "vdp":
... | 52,299 | 33.544254 | 89 | py |
GoTube | GoTube-main/polar_coordinates.py | # transformation between polar and cartesian coordinates
import numpy as np
import jax.numpy as jnp
from jax import jit
import dynamics
# initialize random polar coordinates with dimension dim
_rng = np.random.RandomState(12937)
def uniform(start, end, dim, fixed_seed):
if fixed_seed:
global _rng
... | 976 | 24.051282 | 79 | py |
GoTube | GoTube-main/dynamics.py | # computes the jacobian and the metric for a given model
import jax.numpy as jnp
import numpy as np
from jax import jacfwd, jacrev, jit
from scipy.linalg import eigh
from numpy.linalg import inv
import benchmarks as bm
class FunctionDynamics:
def __init__(self, model):
self.model = model
x = jn... | 1,884 | 26.720588 | 88 | py |
GoTube | GoTube-main/go_tube.py | # Algorithms of GoTube paper for safety region, probability and stoch. optimization
import jax.numpy as jnp
from jax import vmap, pmap
import polar_coordinates as pol
from jax.numpy.linalg import svd
import jax.scipy.special as sc
import time
from performance_log import log_stat
from timer import Timer
from scipy.stat... | 8,987 | 36.764706 | 150 | py |
GoTube | GoTube-main/stochastic_reachtube.py | # optimization problem
import numpy as np
import jax.numpy as jnp
from jax.experimental.ode import odeint
from jax import vmap, jit, pmap, device_put, devices
from functools import partial
from scipy.special import gamma
# own files
import benchmarks as bm
import polar_coordinates as pol
import dynamics
def create... | 9,050 | 34.355469 | 115 | py |
DAVE | DAVE-master/model.py | #
# DAVE: A Deep Audio-Visual Embedding for Dynamic Saliency Prediction
# https://arxiv.org/abs/1905.10693
# https://hrtavakoli.github.io/DAVE/
#
# Copyright by Hamed Rezazadegan Tavakoli
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.resnet3D import resnet18
class ScaleUp(nn.Modul... | 2,372 | 28.6625 | 138 | py |
DAVE | DAVE-master/predict.py | #
# DAVE: A Deep Audio-Visual Embedding for Dynamic Saliency Prediction
# https://arxiv.org/abs/1905.10693
# https://hrtavakoli.github.io/DAVE/
#
# Copyright by Hamed Rezazadegan Tavakoli
#
import re
import os
import torch
import numpy as np
from PIL import Image
from utils.process_video_audio import LoadVideoAudio
... | 3,312 | 29.675926 | 101 | py |
DAVE | DAVE-master/utils/resnet3D.py | #
# 3D-ResNet implementation
# provided by Kensho Hara
# introduced in
# Kensho Hara, Hirokatsu Kataoka, and Yutaka Satoh,
# "Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?",
# Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6546-6555, 2018.
#
import torch... | 7,599 | 30.147541 | 134 | py |
DAVE | DAVE-master/utils/process_video_audio.py |
# generic must imports
import os
import torch
import numpy as np
import utils.audio_params as audio_params
import librosa as sf
from utils.audio_features import waveform_to_feature
from PIL import Image
import torchvision.transforms.functional as F
__all__ = ['LoadVideoAudio']
#defined params @TODO move them to... | 5,524 | 29.694444 | 120 | py |
logbert | logbert-master/TBird/logbert.py | import sys
sys.path.append("../")
sys.path.append("../../")
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../deeplog')
import argparse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from bert_pytorch.dataset import WordVocab
from bert_pytorch import Predictor, Train... | 3,345 | 26.652893 | 87 | py |
logbert | logbert-master/TBird/deeplog.py | # -*- coding: utf-8 -*-
import platform
import argparse
import sys
sys.path.append('../')
from logdeep.models.lstm import *
from logdeep.tools.predict import Predicter
from logdeep.tools.train import Trainer
from logdeep.tools.utils import *
from logdeep.dataset.vocab import Vocab
import torch
output_dir = "../outp... | 3,308 | 25.055118 | 100 | py |
logbert | logbert-master/TBird/loganomaly.py | # -*- coding: utf-8 -*-
import platform
import argparse
import sys
sys.path.append('../')
from logdeep.models.lstm import *
from logdeep.tools.predict import Predicter
from logdeep.tools.train import Trainer
from logdeep.tools.utils import *
from logdeep.dataset.vocab import Vocab
import torch
output_dir = "../outp... | 3,326 | 25.19685 | 100 | py |
logbert | logbert-master/logdeep/tools/utils.py | import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def save_parameters(options, filename):
with open(filename, "w+") as f:
for key in options.keys():
f.write("{}... | 2,823 | 31.090909 | 76 | py |
logbert | logbert-master/logdeep/tools/predict.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gc
import os
import sys
import time
from collections import Counter, defaultdict
sys.path.append('../../')
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from logdeep.data... | 10,216 | 41.045267 | 161 | py |
logbert | logbert-master/logdeep/tools/train.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gc
import os
import sys
import time
sys.path.append('../../')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tqdm import tqdm
import pickle
import torch
import torch.nn as nn
from torch.utils.data import DataLoad... | 11,963 | 38.22623 | 129 | py |
logbert | logbert-master/logdeep/dataset/log.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, Sampler
class log_dataset(Dataset):
def __init__(self, logs, labels, seq=True, quan=False, sem=False, param=False):
self.seq = seq
self.quan = quan
self.... | 1,514 | 29.918367 | 83 | py |
logbert | logbert-master/logdeep/models/lstm.py | import torch
import torch.nn as nn
class deeplog(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, vocab_size, embedding_dim=None):
super(deeplog, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size,
... | 14,766 | 39.792818 | 92 | py |
logbert | logbert-master/loglizer/preprocessing.py | """
The interface for data preprocessing.
Authors:
LogPAI Team
"""
import pandas as pd
import os
import numpy as np
import re
from collections import Counter
from scipy.special import expit
from itertools import compress
from torch.utils.data import DataLoader, Dataset
class Iterator(Dataset):
def __init__... | 5,336 | 34.111842 | 144 | py |
logbert | logbert-master/loglizer/models/DeepLog.py | import torch
import math
import torch.optim as optim
import pandas as pd
from torch import nn
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
from collections import defaultdict
class DeepLog(nn.Module):
def __init__(self, num_labels, hidden_size=100, num_directions=2, topk=9, d... | 4,475 | 45.14433 | 128 | py |
logbert | logbert-master/BGL/logbert.py | import sys
sys.path.append("../")
# sys.path.append("../../")
#
# import os
# dirname = os.path.dirname(__file__)
# filename = os.path.join(dirname, '../deeplog')
import argparse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from bert_pytorch.dataset import WordVocab
from bert_pytorch import Predict... | 3,276 | 26.771186 | 85 | py |
logbert | logbert-master/BGL/deeplog.py | # -*- coding: utf-8 -*-
import platform
import argparse
import sys
sys.path.append('../')
from logdeep.models.lstm import *
from logdeep.tools.predict import Predicter
from logdeep.tools.train import Trainer
from logdeep.tools.utils import *
from logdeep.dataset.vocab import Vocab
import torch
output_dir = "../outpu... | 3,514 | 26.677165 | 105 | py |
logbert | logbert-master/BGL/loganomaly.py | # -*- coding: utf-8 -*-
import argparse
import sys
sys.path.append('../')
from logdeep.models.lstm import *
from logdeep.tools.predict import Predicter
from logdeep.tools.train import Trainer
from logdeep.tools.utils import *
from logdeep.dataset.vocab import Vocab
import torch
output_dir = "../output/bgl/"
# Confi... | 3,293 | 25.142857 | 100 | py |
logbert | logbert-master/bert_pytorch/__main__.py | import argparse
from torch.utils.data import DataLoader
from bert_pytorch.model import BERT
from bert_pytorch.trainer import BERTTrainer
from bert_pytorch.dataset import BERTDataset, WordVocab
def train():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--train_dataset", required=True, type=s... | 4,168 | 53.855263 | 133 | py |
logbert | logbert-master/bert_pytorch/predict_log.py | import numpy as np
import scipy.stats as stats
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import time
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader
from bert_pytorch.dataset import WordVocab
from bert_pytorch.dataset import LogDataset
from bert_pytorch.dataset.samp... | 12,264 | 41.147766 | 149 | py |
logbert | logbert-master/bert_pytorch/train_log.py | from torch.utils.data import DataLoader
from bert_pytorch.model import BERT
from bert_pytorch.trainer import BERTTrainer
from bert_pytorch.dataset import LogDataset, WordVocab
from bert_pytorch.dataset.sample import generate_train_valid
from bert_pytorch.dataset.utils import save_parameters
import matplotlib.pyplot as... | 8,689 | 42.888889 | 143 | py |
logbert | logbert-master/bert_pytorch/trainer/pretrain.py | import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from ..model import BERTLog, BERT
from .optim_schedule import ScheduledOptim
import time
import tqdm
import numpy as np
import pandas as pd
class BERTTrainer:
"""
BERTTrainer make the pretrained BERT model ... | 8,562 | 38.643519 | 145 | py |
logbert | logbert-master/bert_pytorch/dataset/utils.py | import random
import os
import numpy as np
import torch
def save_parameters(options, filename):
with open(filename, "w+") as f:
for key in options.keys():
f.write("{}: {}\n".format(key, options[key]))
# https://gist.github.com/KirillVladimirov/005ec7f762293d2321385580d3dbe335
def seed_everyth... | 538 | 27.368421 | 75 | py |
logbert | logbert-master/bert_pytorch/dataset/dataset.py | from torch.utils.data import Dataset
import tqdm
import torch
import random
import numpy as np
class BERTDataset(Dataset):
def __init__(self, corpus_path, vocab, seq_len, corpus_lines=None, encoding="utf-8", on_memory=True, predict_mode=False):
self.vocab = vocab
self.seq_len = seq_len
sel... | 4,612 | 33.94697 | 125 | py |
logbert | logbert-master/bert_pytorch/dataset/log_dataset.py | from torch.utils.data import Dataset
import torch
import random
import numpy as np
from collections import defaultdict
class LogDataset(Dataset):
def __init__(self, log_corpus, time_corpus, vocab, seq_len, corpus_lines=None, encoding="utf-8", on_memory=True, predict_mode=False, mask_ratio=0.15):
"""
... | 4,713 | 33.918519 | 154 | py |
logbert | logbert-master/bert_pytorch/dataset/vocab.py | import pickle
import tqdm
from collections import Counter
import sys
sys.path.append("../")
class TorchVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used ... | 6,101 | 34.894118 | 93 | py |
logbert | logbert-master/bert_pytorch/model/bert.py | import torch.nn as nn
import torch
from .transformer import TransformerBlock
from .embedding import BERTEmbedding
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size, max_len=512, hidden=768, n_layers=12, attn_heads=12, dr... | 1,831 | 35.64 | 135 | py |
logbert | logbert-master/bert_pytorch/model/log_model.py | import torch.nn as nn
import torch
from .bert import BERT
class BERTLog(nn.Module):
"""
BERT Log Model
"""
def __init__(self, bert: BERT, vocab_size):
"""
:param bert: BERT model which should be trained
:param vocab_size: total vocab size for masked_lm
"""
supe... | 2,278 | 27.848101 | 110 | py |
logbert | logbert-master/bert_pytorch/model/transformer.py | import torch.nn as nn
from .attention import MultiHeadedAttention
from .utils import SublayerConnection, PositionwiseFeedForward
class TransformerBlock(nn.Module):
"""
Bidirectional Encoder = Transformer (self-attention)
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
... | 1,276 | 38.90625 | 110 | py |
logbert | logbert-master/bert_pytorch/model/language_model.py | import torch.nn as nn
from .bert import BERT
class BERTLM(nn.Module):
"""
BERT Language Model
Next Sentence Prediction Model + Masked Language Model
"""
def __init__(self, bert: BERT, vocab_size):
"""
:param bert: BERT model which should be trained
:param vocab_size: tota... | 1,626 | 25.241935 | 72 | py |
logbert | logbert-master/bert_pytorch/model/embedding/bert.py | import torch.nn as nn
import torch
from .token import TokenEmbedding
from .position import PositionalEmbedding
from .segment import SegmentEmbedding
from .time_embed import TimeEmbedding
class BERTEmbedding(nn.Module):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal... | 1,703 | 38.627907 | 100 | py |
logbert | logbert-master/bert_pytorch/model/embedding/position.py | import torch.nn as nn
import torch
import math
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
posi... | 710 | 26.346154 | 95 | py |
logbert | logbert-master/bert_pytorch/model/embedding/time_embed.py | import torch.nn as nn
class TimeEmbedding(nn.Module):
def __init__(self, embed_size=512):
super().__init__()
self.time_embed = nn.Linear(1, embed_size)
def forward(self, time_interval):
return self.time_embed(time_interval)
| 259 | 22.636364 | 50 | py |
logbert | logbert-master/bert_pytorch/model/embedding/segment.py | import torch.nn as nn
class SegmentEmbedding(nn.Embedding):
def __init__(self, embed_size=512):
super().__init__(3, embed_size, padding_idx=0)
| 157 | 21.571429 | 54 | py |
logbert | logbert-master/bert_pytorch/model/embedding/token.py | import torch.nn as nn
class TokenEmbedding(nn.Embedding):
def __init__(self, vocab_size, embed_size=512):
super().__init__(vocab_size, embed_size, padding_idx=0)
| 176 | 24.285714 | 63 | py |
logbert | logbert-master/bert_pytorch/model/attention/multi_head.py | import torch.nn as nn
from .single import Attention
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_... | 1,268 | 32.394737 | 91 | py |
logbert | logbert-master/bert_pytorch/model/attention/single.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import math
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ mat... | 596 | 21.961538 | 66 | py |
logbert | logbert-master/bert_pytorch/model/utils/gelu.py | import torch.nn as nn
import torch
import math
class GELU(nn.Module):
"""
Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
| 301 | 22.230769 | 100 | py |
logbert | logbert-master/bert_pytorch/model/utils/feed_forward.py | import torch.nn as nn
from .gelu import GELU
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
... | 488 | 27.764706 | 67 | py |
logbert | logbert-master/bert_pytorch/model/utils/sublayer.py | import torch.nn as nn
from .layer_norm import LayerNorm
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()... | 565 | 28.789474 | 71 | py |
logbert | logbert-master/bert_pytorch/model/utils/layer_norm.py | import torch.nn as nn
import torch
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(feature... | 519 | 27.888889 | 66 | py |
logbert | logbert-master/HDFS/logbert.py | import sys
sys.path.append("../")
sys.path.append("../../")
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../deeplog')
import argparse
import torch
from bert_pytorch.dataset import WordVocab
from bert_pytorch import Predictor, Trainer
from bert_pytorch.dataset.utils import seed_eve... | 3,433 | 27.857143 | 87 | py |
logbert | logbert-master/HDFS/deeplog.py | # -*- coding: utf-8 -*-
import platform
import argparse
import sys
sys.path.append('../')
from logdeep.models.lstm import *
from logdeep.tools.predict import Predicter
from logdeep.tools.train import Trainer
from logdeep.tools.utils import *
from logdeep.dataset.vocab import Vocab
import torch
data_dir = os.path.exp... | 3,540 | 26.664063 | 105 | py |
logbert | logbert-master/HDFS/loganomaly.py | # -*- coding: utf-8 -*-
import platform
import argparse
import sys
sys.path.append('../')
from logdeep.models.lstm import *
from logdeep.tools.predict import Predicter
from logdeep.tools.train import Trainer
from logdeep.tools.utils import *
from logdeep.dataset.vocab import Vocab
import torch
output_dir = "../outpu... | 3,582 | 26.775194 | 105 | py |
WebBrain | WebBrain-main/generator/chunk_dataset.py | from torch.utils.data import Dataset
import numpy as np
class TextDataset(Dataset):
def __init__(self, data_list, tokenizer, max_encoding_length, max_decoding_length, max_ref_num):
super(TextDataset, self).__init__()
self._data = data_list
self._tokenizer = tokenizer
self._max_encod... | 2,525 | 44.107143 | 134 | py |
WebBrain | WebBrain-main/generator/run_model_test.py | import argparse
import torch
import random
import numpy as np
import os
from tqdm import tqdm
from torch.utils.data import DataLoader
from text_dataset import TextDataset
from bart_generation import FusionModel
from fid_model import FiDBART
from transformers import AdamW, get_linear_schedule_with_warmup, BartConfig, Ba... | 4,083 | 39.84 | 174 | py |
WebBrain | WebBrain-main/generator/run_model_train.py | import argparse
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as multiprocessing
import random
import numpy as np
import os
import time
import sys
import math
import moxing as mox
from tqdm import tqdm
from torch.utils.data import DataLoader
from chunk_dataset import T... | 12,792 | 45.184116 | 223 | py |
WebBrain | WebBrain-main/generator/bart_generation.py | import torch
import torch.nn as nn
import torch.nn.init as init
class FusionModel(nn.Module):
def __init__(self, bart, config):
super(FusionModel, self).__init__()
self.fidbart = bart
self.config = config
def forward(self, batch_data):
"""
Args: context: [batch, 2,... | 728 | 29.375 | 177 | py |
WebBrain | WebBrain-main/generator/fid_model.py | import torch
import torch.nn as nn
from transformers import BartForConditionalGeneration
class FiDBART(BartForConditionalGeneration):
def __init__(self, config):
super().__init__(config)
self.wrap_encoder()
def wrap_encoder(self):
self.model.encoder = EncoderWrapper(self.model.enco... | 2,096 | 38.566038 | 109 | py |
WebBrain | WebBrain-main/retriever/eval.py | from torchmetrics import RetrievalRecall, RetrievalMRR, RetrievalPrecision, RetrievalMAP
from torch import tensor
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import Text_Dataset, get_argument_parser, \
SparseRetrieval
import json
import faiss
import pytorch_lightning as ... | 3,410 | 29.72973 | 88 | py |
WebBrain | WebBrain-main/retriever/train.py | from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor, TQDMProgressBar
import pytorch_lightning as pl
from pytorch_lightning.strategies import DDPStrategy
import os
from utils import get_argument_parser, Hard_Negative_Dataset, set_seed, get_dist_info
from models import ReGenBiEncoder
import warn... | 1,587 | 30.137255 | 93 | py |
WebBrain | WebBrain-main/retriever/index.py | from utils import get_argument_parser, Text_Dataset, IndexDictOfArray, show_memory_info, get_dist_info
from models import ReGenLightning
import pytorch_lightning as pl
from torch.utils.data import DataLoader
import json
from tqdm import tqdm
from torch import distributed as dist
from typing import Union
import torch
im... | 3,112 | 32.836957 | 102 | py |
WebBrain | WebBrain-main/retriever/models/regen_retriever.py |
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForMaskedLM, AutoModel
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from transformers import get_linear_schedule_with_warmup
from utils imp... | 5,429 | 33.585987 | 112 | py |
WebBrain | WebBrain-main/retriever/models/loss_func.py | import torch
import torch.nn as nn
def ranking_loss(query_embs, pos_doc_embs, neg_doc_embs):
batch_size = len(query_embs)
pos_scores = query_embs.mm(pos_doc_embs.T) # B * B
score_mat = pos_scores
if neg_doc_embs is not None:
neg_scores = torch.sum(query_embs.unsqueeze(1) * neg_doc_embs, dim = ... | 638 | 41.6 | 137 | py |
WebBrain | WebBrain-main/retriever/utils/args.py | import argparse
import os
from tqdm import tqdm
import json
from transformers import AutoTokenizer
import torch
def get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--epoch",
help="epochs to train the model", type=int, default=40)
parser.add_argument("-bs", "--ba... | 3,914 | 49.192308 | 112 | py |
WebBrain | WebBrain-main/retriever/utils/dataset.py | from torch.utils.data import Dataset
import os
import json
class Hard_Negative_Dataset(Dataset):
def __init__(self, args, data_path) -> None:
"""
iterably load the triples, tokenize and return
"""
self.args = args
super().__init__()
self.query_length = args.query_l... | 2,514 | 29.301205 | 75 | py |
WebBrain | WebBrain-main/retriever/utils/sys_utils.py | import os
import psutil
import numpy as np
import random
import torch
from torch import distributed as dist
from typing import Tuple
def set_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cud... | 874 | 24.735294 | 53 | py |
WebBrain | WebBrain-main/retriever/utils/retrieval_utils.py | import os
import h5py
import json
import numba
import array
import pickle
import numpy as np
from tqdm import tqdm
from collections import defaultdict
import torch
class IndexDictOfArray:
def __init__(self, index_path=None, force_new=False, filename="array_index.h5py", dim_voc=None):
# index_path = None # ... | 8,044 | 46.60355 | 112 | py |
ESFR | ESFR-main/reconstruction.py | import tensorflow as tf
from tensorflow.keras.layers import Dense
from utils import get_lid_score
class ReconstructionModule(tf.keras.Model):
def __init__(self, input_dim, hidden=(640, 640, 640, 640)):
super(ReconstructionModule, self).__init__()
self.feat_dim = list(hidden)[-1]
self.layer... | 6,366 | 36.017442 | 115 | py |
ESFR | ESFR-main/embeddings/embeddings.py | import os
import pickle
import torch
import collections
import numpy as np
from tqdm import tqdm
import src.models as models
import src.datasets as datasets
DATA_PATH_TO_CUB = ''
DATA_PATH_TO_MINI = ''
DATA_PATH_TO_TIERED = ''
data_dict = {
'cub': [DATA_PATH_TO_CUB, './split/cub', 100],
'mini': [DATA_PATH_... | 3,524 | 33.558824 | 116 | py |
ecir2019-qac | ecir2019-qac-master/qac/baseline/baseline_cnn_cv.py | import argparse
import logging
import logging.config
import os
from collections import namedtuple
from datetime import datetime
from os.path import dirname, join
import numpy as np
import pandas as pd
from sklearn.model_selection import ParameterGrid
import keras
import tensorflow
from keras.callbacks import EarlySto... | 11,315 | 35.503226 | 100 | py |
ecir2019-qac | ecir2019-qac-master/qac/experiments/cnn_util.py | import csv
import inspect
import logging
import os
from contextlib import redirect_stdout
from os.path import join
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import keras
from qac.evaluation import evaluation
from qac.experiments import preprocessing... | 4,808 | 34.360294 | 96 | py |
specter | specter-master/specter/model.py | from typing import Dict, Optional, Union
import numpy
from allennlp.common.checks import ConfigurationError
from overrides import overrides
import torch
import torch.nn.functional as F
from torch.nn import Dropout
from torch.nn.modules.distance import CosineSimilarity
from allennlp.data import Vocabulary
from allennl... | 15,093 | 44.056716 | 119 | py |
specter | specter-master/scripts/pytorch_lightning_training_script/train.py | # basic python packages
import json
import pickle
from typing import Dict
import argparse
from argparse import Namespace
import glob
import random
import numpy as np
import itertools
import logging
logger = logging.getLogger(__name__)
# pytorch packages
import torch
import torch.nn as nn
import torch.nn.functional as ... | 26,484 | 43.215359 | 159 | py |
certifiable-distributional-robustness | certifiable-distributional-robustness-master/utils_tf.py | # Based on code from https://github.com/tensorflow/cleverhans
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import keras
import math
import numpy as np
import os
import six
import tensorflow as tf
import time
import... | 6,975 | 33.196078 | 88 | py |
certifiable-distributional-robustness | certifiable-distributional-robustness-master/train_mnist_models.py | # Based on code from https://github.com/tensorflow/cleverhans
#
# This is the code for the paper
#
# Certifying Some Distributional Robustness with Principled Adversarial Training
# Link: https://openreview.net/forum?id=Hk6kPgZA-
#
# Authors: Aman Sinha, Hongseok Namkoong, John Duchi
from __future__ import absolute_im... | 4,405 | 36.65812 | 92 | py |
certifiable-distributional-robustness | certifiable-distributional-robustness-master/utils.py | # Based on code from https://github.com/tensorflow/cleverhans
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.version import LooseVersion
import keras
from keras.utils import np_utils
from keras.models ... | 7,516 | 32.261062 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.