repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
VLI_SDRO | VLI_SDRO-main/Models/VILLA/model/ot.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Wasserstein Distance (Optimal Transport)
"""
import torch
from torch.nn import functional as F
def cost_matrix_cosine(x, y, eps=1e-5):
""" Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, L... | 2,866 | 32.337209 | 74 | py |
VLI_SDRO | VLI_SDRO-main/Models/VILLA/model/attention.py | """
copy multi-head attention code from pytorch
(https://github.com/pytorch/pytorch),
"""
import warnings
import torch
from torch.nn import Module, Parameter, Linear
from torch.nn.init import xavier_normal_, xavier_uniform_, constant_
from torch.nn.functional import linear, softmax, dropout
def multi_head_attention_... | 19,463 | 47.297767 | 130 | py |
VLI_SDRO | VLI_SDRO-main/Models/VILLA/model/itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for ITM model
"""
from collections import defaultdict
import torch
from torch import nn
from .model import UniterPreTrainedModel, UniterModel
class UniterForImageTextRetrieval(UniterPreTrainedModel):
""" Finetune UNITER for image te... | 5,619 | 39.142857 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for NLVR2
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader... | 17,624 | 41.46988 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/pretrain.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER pre-training
"""
import argparse
from collections import defaultdict
import json
import math
import os
from os.path import exists, join
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functi... | 25,780 | 39.094868 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for Image-Text Retrieval
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader, ConcatDataset
from a... | 17,930 | 42.627737 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/inf_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VCR for submission
"""
import argparse
import json
import os
from os.path import exists
import pandas as pd
from time import time
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from ap... | 10,802 | 36.905263 | 78 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_nlvr2_dataaug.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for NLVR2
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader... | 19,039 | 42.272727 | 83 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for VCR
"""
import argparse
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import ... | 20,770 | 41.131846 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/inf_vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference of VQA for submission
"""
import argparse
import json
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
import num... | 7,305 | 36.27551 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/inf_nlvr2.py | """run inference of NLVR2 (single GPU only)"""
import argparse
import json
import os
from os.path import exists
from time import time
import numpy as np
from torch.nn import functional as F
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch as hvd
from tqdm import tqdm ... | 6,237 | 37.745342 | 77 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/pretrain_vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER pre-training
"""
import argparse
from collections import defaultdict
import json
import os
from os.path import exists, join
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
fr... | 22,741 | 39.538324 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/inf_itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run inference for Image Text Retrieval
"""
import argparse
import json
import os
from os.path import exists
import pickle
from time import time
import torch
from torch.utils.data import DataLoader
from apex import amp
from horovod import torch ... | 6,413 | 38.109756 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for VQA
"""
import argparse
import json
import os
from os.path import abspath, dirname, exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.... | 16,988 | 41.261194 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_nlvr2_stat.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for NLVR2
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader... | 23,501 | 41.345946 | 106 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_ve.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for SNLI-VE
"""
import argparse
import json
import os
from os.path import exists, join
import pickle
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.... | 16,875 | 41.724051 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_itm_hard_negatives.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for Image-Text Retrieval with hard negatives
"""
import argparse
import os
from os.path import exists, join
from time import time
import torch
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader, ... | 19,146 | 42.417234 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/train_vqa_stat.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER finetuning for VQA
"""
import argparse
import json
import os
from os.path import abspath, dirname, exists, join
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.... | 22,792 | 40.366606 | 103 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/optim/misc.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Misc lr helper
"""
from torch.optim import Adam, Adamax
from .adamw import AdamW
def build_optimizer(model, opts):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer... | 1,037 | 27.833333 | 65 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/optim/adamw.py | """
AdamW optimizer (weight decay fix)
copied from hugginface (https://github.com/huggingface/transformers).
"""
import math
import torch
from torch.optim import Optimizer
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.... | 4,450 | 41.798077 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/scripts/convert_ckpt.py | import sys
from collections import OrderedDict
import torch
bert_ckpt, output_ckpt = sys.argv[1:]
bert = torch.load(bert_ckpt)
uniter = OrderedDict()
for k, v in bert.items():
uniter[k.replace('bert', 'uniter')] = v
torch.save(uniter, output_ckpt)
| 256 | 17.357143 | 43 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/Preprocessing/prepro.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
preprocess NLVR annotations into LMDB
"""
import argparse
import json
import os
from os.path import exists
from cytoolz import curry
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from data.data import open_lmdb
@curr... | 3,428 | 31.971154 | 78 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/Preprocessing/prepro_vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
preprocess NLVR annotations into LMDB
"""
import argparse
import json
import random
import os
from os.path import exists
from cytoolz import curry
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from data.data import ope... | 4,245 | 31.412214 | 98 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/Preprocessing/prepro_nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
preprocess NLVR annotations into LMDB
"""
import argparse
import json
import os
from os.path import exists
import random
from cytoolz import curry
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from data.data_sisp impor... | 4,074 | 30.835938 | 89 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/utils/misc.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Misc utilities
"""
import json
import random
import sys
import torch
import numpy as np
from utils.logger import LOGGER
class NoOp(object):
""" useful for distributed training No-Ops """
def __getattr__(self, name):
return sel... | 1,507 | 20.239437 | 70 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/utils/save.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
saving utilities
"""
import json
import os
from os.path import abspath, dirname, exists, join
import subprocess
import torch
from utils.logger import LOGGER
def save_training_meta(args):
if args.rank > 0:
return
if not exists... | 2,734 | 35.959459 | 73 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/utils/itm_eval.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Image Text Retrieval evaluation helper
"""
from time import time
import torch
from horovod import torch as hvd
from tqdm import tqdm
from .logger import LOGGER
from .misc import NoOp
from .distributed import all_gather_list
@torch.no_grad()
d... | 3,661 | 30.843478 | 72 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/utils/distributed.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
distributed API using Horovod
Modified from OpenNMT's native pytorch distributed utils
(https://github.com/OpenNMT/OpenNMT-py)
"""
import math
import pickle
import torch
from horovod import torch as hvd
def all_reduce_and_rescale_tensors(tenso... | 6,296 | 28.985714 | 77 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
VCR dataset
"""
import copy
import json
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
... | 11,643 | 37.556291 | 82 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/mlm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
MLM datasets
"""
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb,
pad_tensors, get_gather_index)
def random_word(... | 4,551 | 32.226277 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/sampler.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
sampler for length bucketing (batch by tokens)
"""
import random
from torch.utils.data import Sampler
from cytoolz import partition_all
class TokenBucketSampler(Sampler):
def __init__(self, lens, bucket_size, batch_size,
d... | 2,031 | 33.440678 | 75 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/vqa_stat.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
VQA dataset
"""
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
import json
import random
import copy
from os.path import abspath, dirname, exists, join
from .data_stat import DetectFeatTxtTokDataset, pad_... | 8,420 | 33.512295 | 139 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/mrm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
MRM Datasets
"""
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from .data import DetectFeatTxtTokDataset, pad_tensors, get_gather_index
def _get_img_mask(mask_prob, num_bb):
img_mask... | 7,228 | 34.965174 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
VQA dataset
"""
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
import json
from os.path import abspath, dirname, exists, join
from .data import DetectFeatTxtTokDataset, pad_tensors, get_gather_index
ans2... | 4,664 | 31.172414 | 78 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/data.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Dataset interfaces
"""
from collections import defaultdict
from contextlib import contextmanager
import io
import json
from os.path import exists
import numpy as np
import torch
from torch.utils.data import Dataset, ConcatDataset
import horovod.... | 11,447 | 31.157303 | 78 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/data_stat.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Dataset interfaces
"""
from collections import defaultdict
from contextlib import contextmanager
import io
import json
from os.path import exists
import numpy as np
import torch
from torch.utils.data import Dataset, ConcatDataset
import horovod.... | 12,314 | 31.070313 | 78 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/pretrain_vcr.py | from .vcr import VcrDetectFeatTxtTokDataset
from .mlm import random_word
import torch
from toolz.sandbox import unzip
from torch.nn.utils.rnn import pad_sequence
from .data import pad_tensors, get_gather_index
from .mrm import (
_get_img_tgt_mask, _get_img_mask, _mask_img_feat,
_get_feat_target, _get_targets)
... | 9,933 | 35.255474 | 77 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
NLVR2 dataset
"""
import copy
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
get_ids... | 7,831 | 32.046414 | 76 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/loader.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
A prefetch loader to speedup data loading
Modified from Nvidia Deep Learning Examples
(https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch).
"""
import random
import torch
from torch.utils.data import DataLoader
from utils.distri... | 4,747 | 32.202797 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/nlvr2_stat.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
NLVR2 dataset
"""
import copy
import torch
import random
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
from .data import (DetectFeatTxtTokDataset, TxtTokLmdb, DetectFeatLmdb,
... | 9,626 | 34.263736 | 141 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Itm dataset
"""
from collections import defaultdict
import copy
import random
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
from cytoolz import concat
import numpy as np
from .data import (DetectFeatTx... | 16,959 | 35.162047 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/data/data_sisp.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Dataset interfaces
"""
from collections import defaultdict
from contextlib import contextmanager
import io
import json
from os.path import exists
import numpy as np
import torch
from torch.utils.data import Dataset, ConcatDataset
# import horovo... | 10,030 | 31.047923 | 78 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/vcr.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for VCR model
"""
from collections import defaultdict
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
# from .layer import GELU
from .model import (
... | 3,024 | 37.782051 | 80 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/pretrain.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for pretraining
"""
from collections import defaultdict
import torch
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU, BertOnlyM... | 10,155 | 43.156522 | 78 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/layer.py | """
BERT layers from the huggingface implementation
(https://github.com/huggingface/transformers)
"""
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the... | 9,378 | 39.081197 | 104 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/model.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Pytorch modules
some classes are modified from HuggingFace
(https://github.com/huggingface/transformers)
"""
import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm im... | 15,887 | 42.173913 | 79 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/vqa.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for VQA model
"""
from collections import defaultdict
from torch import nn
from torch.nn import functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .layer import GELU
from .model import Uniter... | 1,860 | 34.113208 | 75 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/pretrain_vcr.py | from .pretrain import UniterForPretraining
from torch import nn
from .layer import BertOnlyMLMHead
from collections import defaultdict
from torch.nn import functional as F
import torch
class UniterForPretrainingForVCR(UniterForPretraining):
""" 2nd Stage Pretrain UNITER for VCR
"""
def init_type_embedding... | 7,123 | 46.493333 | 80 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/nlvr2.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Uniter for NLVR2 model
"""
from collections import defaultdict
import torch
from torch import nn
from torch.nn import functional as F
from .model import UniterPreTrainedModel, UniterModel
from .attention import MultiheadAttention
class Uniter... | 8,505 | 40.492683 | 76 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/ot.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Wasserstein Distance (Optimal Transport)
"""
import torch
from torch.nn import functional as F
def cost_matrix_cosine(x, y, eps=1e-5):
""" Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, L... | 2,866 | 32.337209 | 74 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/attention.py | """
copy multi-head attention code from pytorch
(https://github.com/pytorch/pytorch),
"""
import warnings
import torch
from torch.nn import Module, Parameter, Linear
from torch.nn.init import xavier_normal_, xavier_uniform_, constant_
from torch.nn.functional import linear, softmax, dropout
def multi_head_attention_... | 19,463 | 47.297767 | 130 | py |
VLI_SDRO | VLI_SDRO-main/Models/UNITER/model/itm.py | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for ITM model
"""
from collections import defaultdict
import torch
from torch import nn
from .model import UniterPreTrainedModel, UniterModel
class UniterForImageTextRetrieval(UniterPreTrainedModel):
""" Finetune UNITER for image te... | 5,619 | 39.142857 | 79 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/utils.py | import math
import random
import numpy as np
import tensorflow as tf
import pickle
import spacy
import json
from nltk.corpus import wordnet
from word2number import w2n
from num2words import num2words
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import wordnet as wn
nouns = {x.name().split('... | 4,497 | 28.019355 | 199 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/get_noun_tokens.py | import json
# import pytorch_transformers
import nltk
import logging
import spacy
import lemminflect
import yaml
from tqdm import tqdm
file = open('config.yaml', 'r')
cfg = yaml.load(file, Loader=yaml.FullLoader)
nltk.download('punkt')
nlp = spacy.load('en_core_web_lg')
def stem(a):
# logging.info("Stemming {}".f... | 5,961 | 30.378947 | 92 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/FairSeqNmt/nmt.py |
import torch
from fairseq.models.transformer import TransformerModel
class MY_NMT:
def __init__(self):
self.en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-ru.single_model', tokenizer='moses', bpe='fastbpe')
self.en2de.cuda()
self.de2en = torch.hub.load('pytorch/fairseq', '... | 513 | 41.833333 | 128 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/FairSeqNmt/model.py | import torch
import json
import logging
logging.getLogger().setLevel(logging.INFO)
from fairseq.models.transformer import TransformerModel
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
en2de.cuda()
de2en = torch.hub.load('pytorch/fairseq', 'transfor... | 1,002 | 37.576923 | 115 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/FairSeqNmt/model_tvqa.py | import torch
import json
import logging
logging.getLogger().setLevel(logging.INFO)
from fairseq.models.transformer import TransformerModel
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
en2de.cuda()
de2en = torch.hub.load('pytorch/fairseq', 'transform... | 861 | 36.478261 | 115 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/FairSeqNmt/model_violin.py | import torch
import json
import logging
logging.getLogger().setLevel(logging.INFO)
from fairseq.models.transformer import TransformerModel
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
en2de.cuda()
de2en = torch.hub.load('pytorch/fairseq', 'transform... | 1,143 | 39.857143 | 115 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/FairSeqNmt/model_nlvr2.py |
import torch
import json
import logging
logging.getLogger().setLevel(logging.INFO)
from fairseq.models.transformer import TransformerModel
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
# en2de.cuda()
de2en = torch.hub.load('pytorch/fairseq', 'transf... | 1,018 | 35.392857 | 115 | py |
VLI_SDRO | VLI_SDRO-main/SISP Transformations/FairSeqNmt/model_test.py | import torch
import json
import logging
logging.getLogger().setLevel(logging.INFO)
from fairseq.models.transformer import TransformerModel
# Round-trip translations between English and German:
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
en2de.cuda(... | 1,074 | 42 | 115 | py |
extremeText | extremeText-master/python/doc/examples/FastTextEmbeddingBag.py | #!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#... | 2,920 | 34.192771 | 90 | py |
alpaqa | alpaqa-main/sphinx/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup ------------------------------------------------------------... | 5,667 | 29.637838 | 79 | py |
EndoNeRF | EndoNeRF-master/eval_rgb.py |
import os, sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import math
from math import exp
import configargparse
import random, time
import imageio
import lpips
'''
SSIM utils
'''
def gaussian(window_size, sigma):
gauss = torch.Tenso... | 6,829 | 31.216981 | 122 | py |
EndoNeRF | EndoNeRF-master/endo_pc_reconstruction.py | from run_endonerf import config_parser, create_nerf
import torch
# from load_blender import pose_spherical
from run_endonerf import render_path
from run_endonerf_helpers import to8b
import numpy as np
import matplotlib.pyplot as plt
# import mcubes
# import trimesh
import os
import configargparse
import open3d as o3d
i... | 6,790 | 37.585227 | 152 | py |
EndoNeRF | EndoNeRF-master/load_blender.py | import os
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.... | 4,467 | 29.813793 | 119 | py |
EndoNeRF | EndoNeRF-master/run_endonerf.py | import os
import imageio
import time
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from run_endonerf_helpers import *
from load_blender import load_blender_data
from load_llff import load_llff_data
try:
from apex import amp
except ImportError:
pass
device = torch.device("... | 58,833 | 44.892356 | 183 | py |
EndoNeRF | EndoNeRF-master/load_llff.py | import numpy as np
import torch
import os, imageio
########## Slightly modified version of LLFF data loading code
########## see https://github.com/Fyusion/LLFF for original
########## Adapted to DaVinci endoscopic surgery datasets
def _minify(basedir, factors=[], dir_name='images', resolutions=[]):
needtoloa... | 15,642 | 32.713362 | 182 | py |
EndoNeRF | EndoNeRF-master/run_endonerf_helpers.py | from traceback import print_stack
import torch
torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torchsearchsorted import searchsorted
# Misc
img2mse = lambda x, y : torch.mean((x - y) ** 2)
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch... | 20,188 | 36.878049 | 139 | py |
EndoNeRF | EndoNeRF-master/torchsearchsorted/setup.py | from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDA_HOME
from torch.utils.cpp_extension import CppExtension, CUDAExtension
# In any case, include the CPU version
modules = [
CppExtension('torchsearchsorted.cpu',
['src/cpu/searchsorted_cpu_wrapper.... | 1,121 | 25.714286 | 65 | py |
EndoNeRF | EndoNeRF-master/torchsearchsorted/examples/test.py | import torch
from torchsearchsorted import searchsorted, numpy_searchsorted
import time
if __name__ == '__main__':
# defining the number of tests
ntests = 2
# defining the problem dimensions
nrows_a = 50000
nrows_v = 50000
nsorted_values = 300
nvalues = 1000
# defines the variables. T... | 2,436 | 35.373134 | 80 | py |
EndoNeRF | EndoNeRF-master/torchsearchsorted/examples/benchmark.py | import timeit
import torch
import numpy as np
from torchsearchsorted import searchsorted, numpy_searchsorted
B = 5_000
A = 300
V = 100
repeats = 20
number = 100
print(
f'Benchmark searchsorted:',
f'- a [{B} x {A}]',
f'- v [{B} x {V}]',
f'- reporting fastest time of {repeats} runs',
f'- each run ... | 1,700 | 22.625 | 65 | py |
EndoNeRF | EndoNeRF-master/torchsearchsorted/src/torchsearchsorted/searchsorted.py | from typing import Optional
import torch
# trying to import the CPU searchsorted
SEARCHSORTED_CPU_AVAILABLE = True
try:
from torchsearchsorted.cpu import searchsorted_cpu_wrapper
except ImportError:
SEARCHSORTED_CPU_AVAILABLE = False
# trying to import the CUDA searchsorted
SEARCHSORTED_GPU_AVAILABLE = True
... | 2,072 | 37.388889 | 82 | py |
EndoNeRF | EndoNeRF-master/torchsearchsorted/src/torchsearchsorted/utils.py | import numpy as np
def numpy_searchsorted(a: np.ndarray, v: np.ndarray, side='left'):
"""Numpy version of searchsorted that works batch-wise on pytorch tensors
"""
nrows_a = a.shape[0]
(nrows_v, ncols_v) = v.shape
nrows_out = max(nrows_a, nrows_v)
out = np.empty((nrows_out, ncols_v), dtype=np.... | 530 | 32.1875 | 77 | py |
EndoNeRF | EndoNeRF-master/torchsearchsorted/test/test_searchsorted.py | import pytest
import torch
import numpy as np
from torchsearchsorted import searchsorted, numpy_searchsorted
from itertools import product, repeat
def test_searchsorted_output_dtype(device):
B = 100
A = 50
V = 12
a = torch.sort(torch.rand(B, V, device=device), dim=1)[0]
v = torch.rand(B, A, devi... | 1,420 | 30.577778 | 91 | py |
EndoNeRF | EndoNeRF-master/torchsearchsorted/test/conftest.py | import pytest
import torch
devices = {'cpu': torch.device('cpu')}
if torch.cuda.is_available():
devices['cuda'] = torch.device('cuda:0')
@pytest.fixture(params=devices.values(), ids=devices.keys())
def device(request):
return request.param
| 251 | 20 | 60 | py |
Hierarchy-CLIP | Hierarchy-CLIP-main/hierarcy_clip.py | # -*- coding: utf-8 -*-
"""Hierarcy_Clip.ipynb
Automatically generated by Colaboratory.
Original file is located at the same repo
Hierarcy_Clip.ipynb
# CVPR2023 Hierarchy-CLIP: Improving Zero-shot Generalization and Robustness of Multi-modal Models
*Licensed under the Apache License, Version 2.0.*
This is a co... | 41,304 | 63.338006 | 14,838 | py |
S4A | S4A-main/patches_dataset.py | import numpy as np
import pandas as pd
from datetime import datetime
from typing import Any, Tuple, Union
from tqdm import tqdm
import xarray as xr
from pycocotools.coco import COCO
import netCDF4
from torch.utils.data import Dataset
from pathlib import Path
import pytorch_lightning as pl
from utils.config import RA... | 21,864 | 41.129094 | 130 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/val.py | import torch
import torch.distributed as dist
from src.data import create_dataset_dataloader
from src.models import create_model
from src.train import parse_options
def main():
opt, args = parse_options()
seed = opt["manual_seed"]
# create train, validation, test datasets and dataloaders
for phase i... | 1,119 | 28.473684 | 76 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/test.py | import torch
import torch.distributed as dist
from src.data import create_dataset_dataloader
from src.models import create_model
from src.train import parse_options
def main():
opt, args = parse_options()
seed = opt["manual_seed"]
# create train, validation, test datasets and dataloaders
for phase i... | 1,121 | 28.526316 | 76 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/train.py | import argparse
import datetime
import logging
import math
import time
from os import path as osp
import torch
import torch.distributed as dist
from src.data import create_dataset_dataloader
from src.models import create_model
from src.utils import (
MessageLogger,
dict2str,
get_env_info,
get_root_log... | 6,166 | 30.953368 | 87 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/models/lr_scheduler.py | import math
from collections import Counter
from torch.optim.lr_scheduler import _LRScheduler
class MultiStepRestartLR(_LRScheduler):
"""MultiStep with restarts learning rate scheme.
Args:
optimizer (torch.nn.optimizer): Torch optimizer.
milestones (list): Iterations that will decrease learn... | 4,268 | 32.880952 | 85 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/models/base_model.py | import logging
import os
from collections import OrderedDict
from copy import deepcopy
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from src.models import lr_scheduler as lr_scheduler
from src.utils import master_only
logger = logging.getLogger("src")
class BaseModel:
"""Bas... | 11,860 | 35.161585 | 88 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/models/bce_model.py | import json
import os
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
import torchvision.transforms as T
from tqdm import tqdm
from src.archs import define_network
from src.metrics import cal_metric
from src.models.base_model ... | 7,963 | 33.626087 | 87 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/metrics/f1_score.py | import numpy as np
import torch
from sklearn.metrics import precision_recall_curve
def cal_metric(logits, labels):
preds = torch.argmax(logits, dim=1) # logits: (b, 2)
# preds = (logits > 0).long() # logits: (b)
TP = ((labels == 1) & (preds == 1)).sum().item()
FN = ((labels == 1) & (preds == 0)... | 1,139 | 29.810811 | 76 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/utils/misc.py | import os
import random
import time
from os import path as osp
import numpy as np
import torch
from .dist_util import master_only
from .logger import get_root_logger
def set_random_seed(seed):
"""Set random seeds."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manua... | 4,256 | 30.072993 | 87 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/utils/logger.py | import datetime
import logging
import sys
import time
import torch.distributed as dist
from .dist_util import master_only
class MessageLogger:
"""Message logger for printing.
Args:
opt (dict): Config. It contains the following keys:
name (str): Exp name.
logger (dict): Conta... | 5,208 | 30.005952 | 79 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/utils/dist_util.py | import functools
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def init_dist():
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
rank = int(os.environ["RANK"])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(... | 581 | 21.384615 | 52 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/data/pair_dataset.py | import os
import numpy as np
import torch
from torchvision.io import read_image
from transformers import BertTokenizer
from .util import clean_str, read_info, read_pair, str2dict
class PairDataset(torch.utils.data.Dataset):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.... | 3,013 | 30.395833 | 86 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/data/__init__.py | import importlib
import math
import random
from functools import partial
from os import path as osp
import numpy as np
import torch
import torch.distributed as dist
import torch.utils.data
from torch.utils.data.distributed import DistributedSampler
from src.utils import get_root_logger, scandir
__all__ = ["create_da... | 3,587 | 30.473684 | 87 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/src/archs/vl_arch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel, SwinModel
class VLArch(nn.Module):
def __init__(self, opt):
super().__init__()
self.txt_encoder1 = BertModel.from_pretrained(opt["bert"])
self.txt_encoder1.pooler = nn.Identity()
... | 1,464 | 34.731707 | 87 | py |
CCKS2022-track2-solution | CCKS2022-track2-solution-master/test/test_dataloader.py | import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from src.data import create_dataset
from src.utils import parse
opt = parse("options/63_grad_clip_norm_0.5.yml")
for phase in opt["datasets"]:
opt["datasets"][phase]["phase"] = phase
dataset_opt = opt["datasets"]["train"]
train_set = crea... | 1,381 | 30.409091 | 71 | py |
DeepMoon | DeepMoon-master/model_train.py | #!/usr/bin/env python
"""Convolutional Neural Network Training Functions
Functions for building and training a (UNET) Convolutional Neural Network on
images of the Moon and binary ring targets.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import h5py
fro... | 16,905 | 38.316279 | 91 | py |
DeepMoon | DeepMoon-master/get_unique_craters.py | #!/usr/bin/env python
"""Unique Crater Distribution Functions
Functions for extracting craters from model target predictions and filtering
out duplicates.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import h5py
import sys
import utils.template_match_target as tmt
import uti... | 7,016 | 33.229268 | 79 | py |
DeepMoon | DeepMoon-master/tests/test_model_train.py | from __future__ import absolute_import, division, print_function
import numpy as np
from keras import backend as K
import sys
sys.path.append('../')
import model_train as mt
class TestModelTrain():
"""Tests model building."""
def test_build_model(self):
dim = 256
FL = 3
learn_rate = 0... | 1,075 | 32.625 | 110 | py |
sequential-knowledge-transformer | sequential-knowledge-transformer-master/inference.py | import os
import math
from pprint import PrettyPrinter
import random
import numpy as np
import torch # Torch must be imported before sklearn and tf
import sklearn
import tensorflow as tf
import better_exceptions
from tqdm import tqdm, trange
import colorlog
import colorful
from utils.etc_utils import set_logger, set... | 6,891 | 39.541176 | 93 | py |
sequential-knowledge-transformer | sequential-knowledge-transformer-master/interactive.py | import os
import math
from pprint import PrettyPrinter
import random
import numpy as np
import torch
import sklearn
import tensorflow as tf
import better_exceptions
from tqdm import tqdm, trange
import colorlog
import colorful
from utils.etc_utils import set_logger, set_tcmalloc, set_gpus, check_none_gradients
from u... | 4,766 | 33.543478 | 84 | py |
sequential-knowledge-transformer | sequential-knowledge-transformer-master/train.py | import os
import math
from pprint import PrettyPrinter
import random
import numpy as np
import torch # Torch must be imported before sklearn and tf
import sklearn
import tensorflow as tf
import better_exceptions
from tqdm import tqdm, trange
import colorlog
import colorful
from utils.etc_utils import set_logger, set... | 8,038 | 42.928962 | 111 | py |
sequential-knowledge-transformer | sequential-knowledge-transformer-master/official/bert/optimization.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | 7,206 | 37.540107 | 80 | py |
sequential-knowledge-transformer | sequential-knowledge-transformer-master/official/bert/run_squad.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | 12,676 | 36.729167 | 80 | py |
sequential-knowledge-transformer | sequential-knowledge-transformer-master/official/bert/run_classifier.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | 8,913 | 35.089069 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.