repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
LatentOps | LatentOps-main/code/examples/big_ae/modules/arae.py | import math
import torch
import torch.nn as nn
from .utils import log_sum_exp
import pdb
import sys
sys.path.append('../../')
from pytorch_transformers.modeling_bert import BertEmbeddings
import torch.nn.functional as F
class ARAE(nn.Module):
def __init__(self, encoder, decoder, tokenizer_encoder, tokenizer_decod... | 13,874 | 49.454545 | 158 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/eval_sampler.py | # ---------------------------------------------------------------
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for LACE. To view a copy of this license, see the LICENSE file.
# ------------------------------------------------------------... | 75,170 | 48.914343 | 149 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/spacefusion.py | from .vae import VAE
import numpy as np
import torch, copy, pdb
import torch.nn.functional as F
from torch import nn
import pdb
def set_trainable(module, value):
for param in module.parameters():
param.requires_grad = value
class SpaceFusion(VAE):
def __init__(self, encoder, decoder, tokenizer_enc... | 5,486 | 37.370629 | 136 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/daae.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import log_sum_exp
from .vae import DenseEmbedder
import pdb
import numpy as np
import logging
logger = logging.getLogger(__name__)
class DAAE(nn.Module):
"""DAAE with normal prior"""
def __init__(self, encoder, decoder... | 25,154 | 35.038682 | 141 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/decoders/decoder.py | import torch
import torch.nn as nn
class DecoderBase(nn.Module):
"""docstring for Decoder"""
def __init__(self):
super(DecoderBase, self).__init__()
def freeze(self):
for param in self.parameters():
param.requires_grad = False
def decode(self, x, z):
"""
... | 1,906 | 23.139241 | 93 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/decoders/dec_gpt2.py | # import torch
import time
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import numpy as np
from .decoder import DecoderBase
class LSTMDecoder(DecoderBase):
"""LSTM decoder with constant-length data""... | 12,113 | 32.837989 | 121 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/encoders/enc_lstm.py | from itertools import chain
import math
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from .gaussian_encoder import GaussianEncoderBase
from ..utils import log_sum_exp
class GaussianLSTMEncoder(GaussianEncoderBase):
"""Gaussian LSTM Encoder with consta... | 4,127 | 31.503937 | 93 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/encoders/gaussian_encoder.py | import math
import torch
import torch.nn as nn
from .encoder import EncoderBase
from ..utils import log_sum_exp
class GaussianEncoderBase(EncoderBase):
"""docstring for EncoderBase"""
def __init__(self):
super(GaussianEncoderBase, self).__init__()
def freeze(self):
for param in self.param... | 4,232 | 27.795918 | 92 | py |
LatentOps | LatentOps-main/code/examples/big_ae/modules/encoders/encoder.py | import math
import torch
import torch.nn as nn
from ..utils import log_sum_exp
class EncoderBase(nn.Module):
"""docstring for EncoderBase"""
def __init__(self):
super(EncoderBase, self).__init__()
def forward(self, x):
"""
Args:
x: (batch_size, *)
Returns: the ... | 1,637 | 27.241379 | 74 | py |
CODINE-copula-estimator | CODINE-copula-estimator-main/CODINE_Toy.py | from __future__ import print_function, division
from keras.layers import Input, Dense, GaussianNoise, LeakyReLU,BatchNormalization
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import backend as K
import tensorflow as tf
import numpy as np
import scipy.io as sio
import argpars... | 12,723 | 41.272425 | 172 | py |
CODINE-copula-estimator | CODINE-copula-estimator-main/CODINE_Gaussian.py | from __future__ import print_function, division
from keras.layers import Input, Dense, GaussianNoise, LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import backend as K
import tensorflow as tf
K.clear_session()
import numpy as np
import scipy.io as sio
import argpars... | 10,555 | 36.835125 | 172 | py |
CEP | CEP-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Chipyard documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 8 11:46:38 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# ... | 7,232 | 30.447826 | 106 | py |
suncet | suncet-main/main.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import torch.multiprocessing as mp
import pprint
import yaml
from src.paws_train import main as paws
f... | 2,557 | 26.212766 | 79 | py |
suncet | suncet-main/snn_eval.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import pprint
from collections import OrderedDict
import numpy as np
import torch
impor... | 9,638 | 26.778098 | 93 | py |
suncet | suncet-main/src/losses.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import torch
from src.utils import (
AllGather,
AllReduce
)
logger = getLogger()
... | 9,176 | 30.644828 | 95 | py |
suncet | suncet-main/src/fine_tune.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
# -- FOR DISTRIBUTED TRAINING ENSURE ONLY 1 DEVICE VISIBLE PER PROCESS
try:
# -- WARNING: IF DOING DISTRIB... | 12,924 | 30.91358 | 99 | py |
suncet | suncet-main/src/sgd.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch.optim import Optimizer
class SGD(Optimizer):
def __init__(self, params, lr, momentum=0, we... | 1,963 | 31.733333 | 83 | py |
suncet | suncet-main/src/resnet.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
__all__ = [
'resnet50',
'resnet50w2',
'resnet50w4',
'resnet101',
... | 9,919 | 28.436202 | 106 | py |
suncet | suncet-main/src/suncet_train.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
# -- FOR DISTRIBUTED TRAINING ENSURE ONLY 1 DEVICE VISIBLE PER PROCESS
try:
# -- WARNING: IF DOING DISTRIB... | 15,445 | 32.360691 | 90 | py |
suncet | suncet-main/src/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import math
import torch
import torch.distributed as dist
from logging import getLogger
logger = getLogger()
... | 5,340 | 26.963351 | 111 | py |
suncet | suncet-main/src/wide_resnet.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = [
'wide_resnet28w2'
]
def conv3x3(in_... | 3,816 | 28.589147 | 98 | py |
suncet | suncet-main/src/snn_fine_tune.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
# -- FOR DISTRIBUTED TRAINING ENSURE ONLY 1 DEVICE VISIBLE PER PROCESS
try:
# -- WARNING: IF DOING DISTRIB... | 13,322 | 30.496454 | 115 | py |
suncet | suncet-main/src/data_manager.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import subprocess
import time
from logging import getLogger
import numpy as np
from math import ceil
import ... | 40,446 | 32.875209 | 146 | py |
suncet | suncet-main/src/lars.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from src.utils import AverageMeter
class LARS(torch.optim.Optimizer):
def __init__(self, optimizer, ... | 2,651 | 29.136364 | 128 | py |
suncet | suncet-main/src/paws_train.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
# -- FOR DISTRIBUTED TRAINING ENSURE ONLY 1 DEVICE VISIBLE PER PROCESS
try:
# -- WARNING: IF DOING DISTRIB... | 17,280 | 33.770624 | 90 | py |
particleNN | particleNN-master/main.py | """
Main function for network training using GeoConv
"""
import torch
from torch import optim
from torch.utils.data import DataLoader
import os, argparse
import numpy as np
from model.GeoConvNet import GeoConvNet
from utils.process_data import collate_ball, collect_file, data_reader, PointData
from train import train
... | 5,514 | 43.837398 | 165 | py |
particleNN | particleNN-master/mean_shift.py | import os
import time
import numpy as np
import torch
from sklearn.decomposition import PCA
from thingking import loadtxt
from torch.utils.data import DataLoader
from model.GeoConvNet import GeoConvNet
from utils.simple import show
from utils.process_data import data_reader, collect_file,PointData, numpy_to_vtp, coll... | 10,925 | 34.245161 | 299 | py |
particleNN | particleNN-master/h_search.py | from utils.process_data import all_file_loader, PointData, collate_ball, collect_file
from torch.utils.data import DataLoader
import torch
import os, math, time
try:
data_path = os.environ['data']
except KeyError:
data_path = './data/'
def get_error (r,data):
eps = 1e-15
sample_size = 10000
batc... | 3,189 | 32.229167 | 120 | py |
particleNN | particleNN-master/train.py | """
Training helper functions
"""
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from utils.process_data import masked_mse_loss
def inference_latent(model,loader:DataLoader,lat_dim,ball,device,show=True):
model.eval()
cur_idx = 0
if ball:
n = sum([len(d[0... | 2,587 | 29.447059 | 83 | py |
particleNN | particleNN-master/utils/process_data.py | import random
import os
import math
from vtkmodules import all as vtk
from vtkmodules.util import numpy_support
import numpy as np
from matplotlib import pyplot as plt
from scipy.spatial import KDTree
from multiprocessing import Pool
import torch
from torch.utils.data import Dataset
from utils.sdf import SDFRead
from ... | 14,846 | 33.934118 | 299 | py |
particleNN | particleNN-master/model/GeoConvNet.py | from torch import nn
import torch
from torch.nn import functional as F
class GeoConvNet(nn.Module):
def __init__(self, lat_dim, input_dim, ball, neuron_num, r):
"""
input size: B * N * C
"""
super(GeoConvNet, self).__init__()
self.vector_length = lat_dim
self.num_cha... | 6,980 | 36.331551 | 97 | py |
multitask-question-generation | multitask-question-generation-main/data_collector.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 14:09:15 2021
@author: ozcan
"""
from typing import Dict, List, Optional
import torch
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pa... | 3,202 | 36.244186 | 150 | py |
multitask-question-generation | multitask-question-generation-main/prepare_data.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 12:40:14 2021
@author: ozcan
"""
import os
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
import pandas as pd
from nlp import Dataset
from transformers import MT5Tokenizer, HfArgumentParser
from process_da... | 4,895 | 29.792453 | 96 | py |
multitask-question-generation | multitask-question-generation-main/pipelines.py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 16:19:31 2021
@author: ozcan
"""
import itertools
import logging
from typing import Optional, Dict, Union
from nltk import sent_tokenize
import nltk
nltk.download('punkt')
import torch
from transformers import(
AutoModelForSeq2SeqLM,
AutoTokenizer,
PreTr... | 8,237 | 31.690476 | 137 | py |
multitask-question-generation | multitask-question-generation-main/utils.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 14:14:20 2021
@author: ozcan
"""
from typing import Callable, Dict, Iterable, List
from torch import nn
# these functions are taken from transformers repo
def grad_status(model: nn.Module) -> Iterable:
return (par.requires_grad for par in model.parameters())
def... | 1,950 | 33.839286 | 99 | py |
multitask-question-generation | multitask-question-generation-main/trainer.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 14:12:48 2021
@author: ozcan
"""
from typing import Any, Dict, Union
import torch
from torch import nn
from transformers import Trainer as HFTrainer
from transformers.file_utils import is_apex_available
if is_apex_available():
from apex import amp
from utils i... | 2,039 | 31.380952 | 109 | py |
multitask-question-generation | multitask-question-generation-main/run_multi.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 14:15:30 2021
@author: ozcan
"""
import dataclasses
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import numpy as np
import torch
from transformers import (
AutoModelForSeq2SeqLM,... | 7,543 | 31.517241 | 133 | py |
usad | usad-master/utils.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from sklearn.metrics import roc_curve,roc_auc_score
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
retu... | 2,094 | 28.507042 | 114 | py |
usad | usad-master/usad.py | import torch
import torch.nn as nn
from utils import *
device = get_default_device()
class Encoder(nn.Module):
def __init__(self, in_size, latent_size):
super().__init__()
self.linear1 = nn.Linear(in_size, int(in_size/2))
self.linear2 = nn.Linear(int(in_size/2), int(in_size/4))
self.linear3 = nn.Lin... | 4,149 | 34.470085 | 117 | py |
SCSS_CSGaussian | SCSS_CSGaussian-main/train_cyclo.py | import os
os.environ['PYTHONHASHSEED'] = '0'
import numpy as np
from tqdm import tqdm
import pickle
soi_type = 'CycloA'
interference_sig_type = 'CycloB'
import random
from src import unet_model as unet
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateSchedule... | 4,551 | 36.00813 | 209 | py |
SCSS_CSGaussian | SCSS_CSGaussian-main/train_comm.py | import os
os.environ['PYTHONHASHSEED'] = '0'
import numpy as np
from tqdm import tqdm
import pickle
import rfcutils.srrcgaussian_helper_fn as srrcfn
import rfcutils.ofdmgaussian_helper_fn as ofdmfn
soi_type = 'RRCGaussian'
interference_sig_type = 'OFDMGaussian'
import random
from src import unet_model as unet
import ... | 3,897 | 38.77551 | 213 | py |
SCSS_CSGaussian | SCSS_CSGaussian-main/train_comm2.py | import os
os.environ['PYTHONHASHSEED'] = '0'
import numpy as np
from tqdm import tqdm
import pickle
import rfcutils.srrcgaussian_helper_fn as srrcfn
import rfcutils.ofdm2gaussian_helper_fn as ofdmfn
soi_type = 'RRCGaussian'
interference_sig_type = 'OFDMGaussian2'
import random
from src import unet_model as unet
impor... | 3,900 | 38.40404 | 213 | py |
SCSS_CSGaussian | SCSS_CSGaussian-main/src/unet_model.py | import tensorflow as tf
import tensorflow.keras as k
from tensorflow.keras import layers
from tensorflow.keras.models import Model
def get_unet_model0(input_shape, k_sz=3, long_k_sz=101, lr=0.0003, k_neurons = 16):
n_window = input_shape[0]
n_ch = 2
in0 = layers.Input(shape=input_shape)
x = ... | 2,087 | 34.389831 | 98 | py |
SCSS_CSGaussian | SCSS_CSGaussian-main/src/waveunet.py | import tensorflow as tf
import tensorflow.keras as k
from tensorflow.keras import layers
from tensorflow.keras.models import Model
def get_wunet_model(input_shape, k_sz=15, merge_k_sz=5, lr=0.0003, k_neurons=32, epsilon=None):
n_window = input_shape[0]
n_ch = 2
in0 = layers.Input(shape=input_shape)
x... | 1,887 | 36.76 | 98 | py |
transformers | transformers-main/conftest.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | 3,232 | 36.16092 | 113 | py |
transformers | transformers-main/setup.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | 15,948 | 33.447084 | 157 | py |
transformers | transformers-main/hubconf.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | 8,496 | 51.450617 | 189 | py |
transformers | transformers-main/examples/run_on_remote.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | 3,649 | 49.694444 | 125 | py |
transformers | transformers-main/examples/research_projects/longform-qa/eli5_app.py | import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from eli5_utils import (
embed_questions_for_retrieval,
make_qa_s2s_model,
qa_s2s_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers impor... | 13,474 | 37.28125 | 159 | py |
transformers | transformers-main/examples/research_projects/longform-qa/eli5_utils.py | import functools
import math
import os # noqa: F401
from random import choice, randint
from time import time
import datasets # noqa: F401
import faiss # noqa: F401
import numpy as np
import pandas as pd
import torch
import torch.utils.checkpoint as checkpoint
from elasticsearch import Elasticsearch # noqa: F401
fr... | 28,240 | 39.988389 | 119 | py |
transformers | transformers-main/examples/research_projects/codeparrot/scripts/codeparrot_training.py | import logging
import os
import time
from argparse import Namespace
from pathlib import Path
import datasets
import torch
from accelerate import Accelerator, DistributedType
from accelerate.utils import ProjectConfiguration
from arguments import TrainingArguments
from datasets import load_dataset
from huggingface_hub ... | 12,979 | 38.452888 | 116 | py |
transformers | transformers-main/examples/research_projects/codeparrot/scripts/validation_loss.py | import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set... | 3,496 | 33.97 | 114 | py |
transformers | transformers-main/examples/research_projects/codeparrot/scripts/human_eval.py | import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torc... | 9,004 | 38.323144 | 118 | py |
transformers | transformers-main/examples/research_projects/self-training-text-classification/selftraining.py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab... | 16,963 | 42.609254 | 119 | py |
transformers | transformers-main/examples/research_projects/self-training-text-classification/finetuning.py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab... | 34,604 | 41.616995 | 119 | py |
transformers | transformers-main/examples/research_projects/bertology/run_prune_gpt.py | #!/usr/bin/env python3
""" This script is adapted from the Bertology pruning code (https://github.com/huggingface/transformers/blob/783d7d2629e97c5f0c5f9ef01b8c66410275c204/examples/research_projects/bertology/run_bertology.py)
to prune GPT-like models. The author is @altsoph.
"""
import argparse
import logging
import... | 15,491 | 38.520408 | 204 | py |
transformers | transformers-main/examples/research_projects/bertology/run_bertology.py | #!/usr/bin/env python3
# Copyright 2018 CMU and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir... | 18,594 | 40.048565 | 118 | py |
transformers | transformers-main/examples/research_projects/rag/use_own_knowledge_dataset.py | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import (
DPRCo... | 8,256 | 38.507177 | 144 | py |
transformers | transformers-main/examples/research_projects/rag/utils_rag.py | import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transfo... | 8,107 | 32.093878 | 118 | py |
transformers | transformers-main/examples/research_projects/rag/finetune_rag.py | """Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py"""
import argparse
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
import... | 26,194 | 39.3 | 119 | py |
transformers | transformers-main/examples/research_projects/rag/distributed_pytorch_retriever.py | import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
logger = logging.getLogger(__name__)
class RagPyTorchDistributedRetriever(RagRetriever):
"""
A distributed retriever built on top of ... | 6,539 | 46.05036 | 155 | py |
transformers | transformers-main/examples/research_projects/rag/test_distributed_retriever.py | import json
import os
import shutil
import sys
import tempfile
import unittest
from unittest import TestCase
from unittest.mock import patch
import faiss
import numpy as np
from datasets import Dataset
from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig
from transform... | 13,794 | 39.693215 | 118 | py |
transformers | transformers-main/examples/research_projects/rag/eval_rag.py | """ Evaluation script for RAG models."""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as trans... | 11,211 | 33.928349 | 119 | py |
transformers | transformers-main/examples/research_projects/rag/lightning_base.py | import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
... | 15,639 | 37.617284 | 124 | py |
transformers | transformers-main/examples/research_projects/rag/callbacks_rag.py | import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def count_trainable_parameters(model):
model_paramet... | 4,442 | 36.974359 | 116 | py |
transformers | transformers-main/examples/research_projects/rag/_test_finetune_rag.py | import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.b... | 3,954 | 34.3125 | 85 | py |
transformers | transformers-main/examples/research_projects/pplm/run_pplm.py | #! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless ... | 29,007 | 34.203883 | 182 | py |
transformers | transformers-main/examples/research_projects/pplm/run_pplm_discrim_train.py | #! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless ... | 18,797 | 34.874046 | 117 | py |
transformers | transformers-main/examples/research_projects/pplm/pplm_classification_head.py | from torch import nn
class ClassificationHead(nn.Module):
"""Classification Head for transformer encoders"""
def __init__(self, class_size, embed_size):
super().__init__()
self.class_size = class_size
self.embed_size = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size... | 651 | 31.6 | 68 | py |
transformers | transformers-main/examples/research_projects/deebert/test_glue_deebert.py | import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def get_setup_file():
parser = argparse... | 3,690 | 34.152381 | 109 | py |
transformers | transformers-main/examples/research_projects/deebert/run_glue_deebert.py | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import time
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distribute... | 31,739 | 42.125 | 150 | py |
transformers | transformers-main/examples/research_projects/deebert/src/modeling_highway_bert.py | import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
... | 17,702 | 43.2575 | 172 | py |
transformers | transformers-main/examples/research_projects/deebert/src/modeling_highway_roberta.py | from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta... | 6,789 | 42.806452 | 172 | py |
transformers | transformers-main/examples/research_projects/layoutlmv3/run_funsd_cord.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-... | 21,185 | 38.674157 | 118 | py |
transformers | transformers-main/examples/research_projects/lxmert/modeling_frcnn.py | """
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2 && Huggingface Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www... | 73,740 | 37.366805 | 152 | py |
transformers | transformers-main/examples/research_projects/lxmert/extracting_data.py | import getopt
import json
import os
# import numpy as np
import sys
from collections import OrderedDict
import datasets
import numpy as np
import torch
from modeling_frcnn import GeneralizedRCNN
from processing_image import Preprocess
from utils import Config
"""
USAGE:
``python extracting_data.py -i <img_dir> -o ... | 5,244 | 33.966667 | 109 | py |
transformers | transformers-main/examples/research_projects/lxmert/utils.py | """
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :)
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://w... | 18,206 | 31.805405 | 122 | py |
transformers | transformers-main/examples/research_projects/lxmert/visualizing_image.py | """
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/license... | 13,420 | 25.842 | 100 | py |
transformers | transformers-main/examples/research_projects/lxmert/processing_image.py | """
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/license... | 5,747 | 37.066225 | 114 | py |
transformers | transformers-main/examples/research_projects/vqgan-clip/loaders.py | import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def load_config(config_path, display=False):
config = OmegaConf.load(config_path)
if display:
print(yaml.dump(OmegaConf.to_container(config)))
return config
def load_vqgan(device, c... | 2,230 | 28.746667 | 108 | py |
transformers | transformers-main/examples/research_projects/vqgan-clip/VQGAN_CLIP.py | import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from uti... | 11,225 | 40.732342 | 136 | py |
transformers | transformers-main/examples/research_projects/vqgan-clip/img_processing.py | import numpy as np
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from PIL import Image
def preprocess(img, target_image_size=256):
s = min(img.size)
if s < target_image_size:
raise ValueError(f"min dim for image {s} < {target_image_size}")
... | 1,194 | 22.431373 | 72 | py |
transformers | transformers-main/examples/research_projects/vqgan-clip/utils.py | from datetime import datetime
import matplotlib.pyplot as plt
import torch
def freeze_module(module):
for param in module.parameters():
param.requires_grad = False
def get_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.m... | 969 | 25.944444 | 117 | py |
transformers | transformers-main/examples/research_projects/bertabs/modeling_bertabs.py | # MIT License
# Copyright (c) 2019 Yang Liu and the HuggingFace team
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, c... | 38,255 | 35.124646 | 114 | py |
transformers | transformers-main/examples/research_projects/bertabs/convert_bertabs_original_pytorch_checkpoint.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... | 6,523 | 34.075269 | 117 | py |
transformers | transformers-main/examples/research_projects/bertabs/utils_summarization.py | import os
from collections import deque
import torch
from torch.utils.data import Dataset
# ------------
# Data loading
# ------------
class CNNDMDataset(Dataset):
"""Abstracts the dataset used to train seq2seq models.
The class will process the documents that are located in the specified
folder. The ... | 5,753 | 33.25 | 106 | py |
transformers | transformers-main/examples/research_projects/bertabs/test_utils_summarization.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | 4,419 | 43.646465 | 99 | py |
transformers | transformers-main/examples/research_projects/bertabs/run_summarization.py | #! /usr/bin/python3
import argparse
import logging
import os
import sys
from collections import namedtuple
import torch
from modeling_bertabs import BertAbs, build_predictor
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import BertTokenizer
from .utils_summarizati... | 10,202 | 28.318966 | 115 | py |
transformers | transformers-main/examples/research_projects/fsner/setup.py | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="fsner",
version="0.0.1",
author="msi sayef",
author_email="msi.sayef@gmail.com",
description="Few-shot Named Entity Recognition",
long_description=long_description,
... | 864 | 29.892857 | 97 | py |
transformers | transformers-main/examples/research_projects/fsner/src/fsner/tokenizer_utils.py | import torch
from transformers import AutoTokenizer
class FSNERTokenizerUtils(object):
def __init__(self, pretrained_model_name_or_path):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
def tokenize(self, x):
"""
Wrapper function for tokenizing query and... | 3,989 | 37.737864 | 182 | py |
transformers | transformers-main/examples/research_projects/fsner/src/fsner/model.py | import torch
from transformers import AutoModel
class FSNERModel(torch.nn.Module):
"""
The FSNER model implements a few-shot named entity recognition method from the paper `Example-Based Named Entity Recognition <https://arxiv.org/abs/2008.10570>`__ by
Morteza Ziyadi, Yuting Sun, Abhishek Goswami, Jade H... | 3,100 | 37.283951 | 169 | py |
transformers | transformers-main/examples/research_projects/adversarial/run_hans.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | 8,264 | 33.012346 | 117 | py |
transformers | transformers-main/examples/research_projects/adversarial/utils_hans.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | 11,760 | 33.591176 | 118 | py |
transformers | transformers-main/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | 28,211 | 40.488235 | 145 | py |
transformers | transformers-main/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | 31,562 | 39.413572 | 145 | py |
transformers | transformers-main/examples/research_projects/robust-speech-event/eval.py | #!/usr/bin/env python3
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def log_results(result: Dataset, args: Dict[str, str]):
"""DO NOT CHANGE. This function computes and logs t... | 4,711 | 33.394161 | 147 | py |
transformers | transformers-main/examples/research_projects/performer/run_mlm_performer.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless require... | 28,738 | 40.530347 | 119 | py |
transformers | transformers-main/examples/research_projects/performer/modeling_flax_performer.py | # coding=utf-8
# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
... | 21,121 | 37.264493 | 120 | py |
transformers | transformers-main/examples/research_projects/performer/modeling_flax_performer_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab... | 25,678 | 37.966616 | 119 | py |
transformers | transformers-main/examples/research_projects/information-gain-filtration/run_clm_igf.py | # Copyright 2022 - Intel Corp. All rights reserved.
# Authors: Mayank Kumar Raunak, Javier Turek, Nicole Beckage
"""
Implementation of a new method for fine-tuning transformer models that we call
Information Gain Filtration 'IGF' on WikiText data set and compared the results
with the standard fine-tuning method
Steps... | 15,491 | 33.735426 | 150 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.