repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
steer | steer-master/latent_ode/lib/latent_ode.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils impo... | 4,826 | 33.478571 | 99 | py |
steer | steer-master/latent_ode/lib/likelihood_eval.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import gc
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.... | 9,166 | 33.592453 | 114 | py |
steer | steer-master/latent_ode/lib/utils.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import logging
import pickle
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import math
import glob
import re
from shutil import copyfile
import skle... | 18,626 | 28.660828 | 149 | py |
steer | steer-master/latent_ode/lib/encoder_decoder.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from torch.distributions import Categorical, Normal
import lib.utils as... | 9,918 | 28.520833 | 130 | py |
steer | steer-master/latent_ode/lib/base_models.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.encoder_decoder import *
from lib.likelihood_eval import *
f... | 11,032 | 31.072674 | 112 | py |
steer | steer-master/latent_ode/lib/parse_datasets.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import numpy as np
import torch
import torch.nn as nn
import lib.utils as utils
from lib.diffeq_solver import DiffeqSolver
from generate_timeseries import Periodic_1d
from torc... | 9,406 | 37.871901 | 135 | py |
steer | steer-master/stiff_ode_experiments/stiff_ode_demo.py | import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, d... | 5,987 | 32.452514 | 161 | py |
steer | steer-master/torchdiffeq/setup.py | import setuptools
setuptools.setup(
name="torchdiffeq",
version="0.0.1",
author="Ricky Tian Qi Chen",
author_email="rtqichen@cs.toronto.edu",
description="ODE solvers and adjoint sensitivity analysis in PyTorch.",
url="https://github.com/arnabgho/torchdiffeq",
packages=['torchdiffeq', 'torc... | 443 | 30.714286 | 75 | py |
steer | steer-master/torchdiffeq/tests/gradient_tests.py | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestGradient(unittest.TestCase)... | 5,019 | 33.14966 | 96 | py |
steer | steer-master/torchdiffeq/tests/api_tests.py | import unittest
import torch
import torchdiffeq
from problems import construct_problem
eps = 1e-12
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
class TestCollectionState(unittest.Te... | 2,805 | 32.011765 | 114 | py |
steer | steer-master/torchdiffeq/tests/odeint_tests.py | import unittest
import torch
import torchdiffeq
import problems
error_tol = 1e-4
torch.set_default_dtype(torch.float64)
TEST_DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def max_abs(tensor):
return torch.max(torch.abs(tensor))
def rel_error(true, estimate):
return max_abs((true... | 6,526 | 35.875706 | 88 | py |
steer | steer-master/torchdiffeq/tests/problems.py | import math
import numpy as np
import scipy.linalg
import torch
class ConstantODE(torch.nn.Module):
def __init__(self, device):
super(ConstantODE, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2).to(device))
self.b = torch.nn.Parameter(torch.tensor(3.0).to(device))
def ... | 2,533 | 28.126437 | 104 | py |
steer | steer-master/torchdiffeq/tests/DETEST/run.py | import time
import numpy as np
from scipy.stats.mstats import gmean
import torch
from torchdiffeq import odeint
import detest
torch.set_default_tensor_type(torch.DoubleTensor)
class NFEDiffEq:
def __init__(self, diffeq):
self.diffeq = diffeq
self.nfe = 0
def __call__(self, t, y):
se... | 1,843 | 29.733333 | 119 | py |
steer | steer-master/torchdiffeq/tests/DETEST/detest.py | import math
import torch
####################################
# Problem Class A. Single equations.
####################################
def A1():
diffeq = lambda t, y: -y
init = lambda: (torch.tensor(0.), torch.tensor(1.))
solution = lambda t: torch.exp(-t)
return diffeq, init, solution
def A2():
... | 7,740 | 22.107463 | 119 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_normal.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 7,776 | 35.511737 | 175 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_normal.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 4,034 | 35.351351 | 181 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adjoint.py | import torch
import torch.nn as nn
from . import odeint
from .misc import _flatten, _flatten_convert_none_to_zeros
class OdeintAdjointMethod(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
assert len(args) >= 8, 'Internal error: all arguments required.'
y0, func, t, flat_para... | 5,471 | 39.835821 | 111 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_skip_step.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 10,205 | 37.659091 | 132 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v2_inference.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 7,970 | 34.744395 | 185 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_v3.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 4,017 | 35.198198 | 184 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 7,165 | 35.01005 | 137 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adaptive_heun.py | # Based on https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/integrate
import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveSt... | 4,839 | 42.214286 | 118 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v3.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 7,805 | 35.647887 | 176 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/bosh3.py | import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveStepsizeODESolver
from .interp import _interp_fit, _interp_evaluate
from .rk_common import _Run... | 4,552 | 44.989899 | 118 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/misc.py | import warnings
import torch
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def _flatten_convert_none_to_zeros(sequence, like_sequence):
flat = [
p.contiguous().view(-1) if p is not None else torch.zeros_li... | 6,621 | 32.785714 | 119 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 3,574 | 38.722222 | 146 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/interp.py | import torch
from .misc import _convert_to_tensor, _dot_product
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function... | 2,501 | 36.909091 | 110 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/tsit5.py | import torch
from .misc import _scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs
from .solvers import AdaptiveStepsizeODESolver
from .rk_common import _RungeKuttaState, _ButcherTableau, _runge_kutta_step
# Parameters from Tsitouras (2011).
_TSITOURAS_TABLEAU = _ButcherTab... | 6,777 | 47.414286 | 120 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_skip_step.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 3,667 | 38.021277 | 140 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/adams.py | import collections
import torch
from .solvers import AdaptiveStepsizeODESolver
from .misc import (
_handle_unused_kwargs, _select_initial_step, _convert_to_tensor, _scaled_dot_product, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
_MIN_ORDER = 1
_MAX_ORDER = 12
gamma_star = [
1, -1 / 2, -1 / 12... | 7,148 | 39.851429 | 128 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_adjoint_stochastic_end_v2.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 4,062 | 35.276786 | 184 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/solvers.py | import abc
import torch
from .misc import _assert_increasing, _handle_unused_kwargs
class AdaptiveStepsizeODESolver(object):
__metaclass__ = abc.ABCMeta
def __init__(self, func, y0, atol, rtol, **unused_kwargs):
_handle_unused_kwargs(self, unused_kwargs)
del unused_kwargs
self.func =... | 3,276 | 29.06422 | 89 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/odeint_stochastic_end_v2.py | from .tsit5 import Tsit5Solver
from .dopri5 import Dopri5Solver
from .bosh3 import Bosh3Solver
from .adaptive_heun import AdaptiveHeunSolver
from .fixed_grid import Euler, Midpoint, RK4
from .fixed_adams import AdamsBashforth, AdamsBashforthMoulton
from .adams import VariableCoefficientAdamsBashforth
from .misc import ... | 7,458 | 35.925743 | 175 | py |
steer | steer-master/torchdiffeq/torchdiffeq/_impl/dopri5.py | # Based on https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/integrate
import torch
from .misc import (
_scaled_dot_product, _convert_to_tensor, _is_finite, _select_initial_step, _handle_unused_kwargs, _is_iterable,
_optimal_step_size, _compute_error_ratio
)
from .solvers import AdaptiveSt... | 5,566 | 44.260163 | 118 | py |
FragmentVC | FragmentVC-main/convert_batch.py | #!/usr/bin/env python3
"""Convert multiple pairs."""
import warnings
from pathlib import Path
from functools import partial
from multiprocessing import Pool, cpu_count
import yaml
import torch
import numpy as np
import soundfile as sf
from jsonargparse import ArgumentParser, ActionConfigFile
from data import load_wa... | 3,966 | 29.05303 | 87 | py |
FragmentVC | FragmentVC-main/convert.py | #!/usr/bin/env python3
"""Convert using one source utterance and multiple target utterances."""
import warnings
from datetime import datetime
from pathlib import Path
from copy import deepcopy
import torch
import numpy as np
import soundfile as sf
from jsonargparse import ArgumentParser, ActionConfigFile
import sox
... | 4,829 | 32.776224 | 88 | py |
FragmentVC | FragmentVC-main/train.py | #!/usr/bin/env python3
"""Train FragmentVC model."""
import argparse
import datetime
import random
from pathlib import Path
import torch
import torch.nn as nn
from torch.optim import AdamW
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
fr... | 7,874 | 30.754032 | 88 | py |
FragmentVC | FragmentVC-main/preprocess.py | #!/usr/bin/env python3
"""Precompute Wav2Vec features."""
import os
import json
from pathlib import Path
from tempfile import mkstemp
from multiprocessing import cpu_count
import tqdm
import torch
from torch.utils.data import DataLoader
from jsonargparse import ArgumentParser, ActionConfigFile
from models import loa... | 3,318 | 25.766129 | 85 | py |
FragmentVC | FragmentVC-main/models/utils.py | """Useful utilities."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from fairseq.models.wav2vec import Wav2Vec2Model
def load_pretrained_wav2vec(ckpt_path):
"""Load pretrained Wav2Vec model."""
ckpt = torch.load(ckpt_path)
model = Wav2Vec2Mod... | 2,140 | 33.532258 | 116 | py |
FragmentVC | FragmentVC-main/models/model.py | """FragmentVC model architecture."""
from typing import Tuple, List, Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .convolutional_transformer import Smoother, Extractor
class FragmentVC(nn.Module):
"""
FragmentVC uses Wav2Vec feature of the source speaker to q... | 4,523 | 29.362416 | 88 | py |
FragmentVC | FragmentVC-main/models/convolutional_transformer.py | """Convolutional transsformer"""
from typing import Optional, Tuple
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module, Dropout, LayerNorm, Conv1d, MultiheadAttention
class Smoother(Module):
"""Convolutional Transformer Encoder Layer"""
def __init__(self, d_model: int, nhe... | 3,526 | 28.889831 | 84 | py |
FragmentVC | FragmentVC-main/data/intra_speaker_dataset.py | """Dataset for reconstruction scheme."""
import json
import random
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
import torch
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
class IntraSpeakerDataset(Dataset):
"""Dataset for rec... | 4,148 | 31.928571 | 84 | py |
FragmentVC | FragmentVC-main/data/preprocess_dataset.py | """Precompute Wav2Vec features and spectrograms."""
from copy import deepcopy
from pathlib import Path
import torch
from librosa.util import find_files
import sox
from .utils import load_wav, log_mel_spectrogram
class PreprocessDataset(torch.utils.data.Dataset):
"""Prefetch audio data for preprocessing."""
... | 2,354 | 26.068966 | 87 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/utils.py | # coding=utf-8
# Copyleft 2019 Project LXRT
import sys
import csv
import base64
import time
import torch
import numpy as np
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "fea... | 9,752 | 38.646341 | 138 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/param.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import argparse
import random
import numpy as np
import torch
import logging
logging.basicConfig(level=logging.INFO)
def get_optimizer(optim):
# Bind the optimizer
if optim == 'rms':
print("Optimizer: Using RMSProp")
optimizer = torch.optim.RMSpro... | 6,424 | 38.906832 | 117 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tools/convert_tsv_to_h5.py | import sys
import csv
import base64
import time
import torch
import numpy as np
from src.utils import load_obj_tsv_save_to_h5
load_obj_tsv_save_to_h5(
"data/mscoco_imgfeat/train2014_obj36.tsv",
"data/mscoco_imgfeat/train2014_obj36.h5",
"data/mscoco_imgfeat/train2014_obj36.json",
82783
)
load_obj_t... | 1,165 | 20.592593 | 47 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/box.py | import torch
import numpy
import numpy as np
def heuristic_filter(box_a, box_b, image_size, threshhold = 0.15):
# center_mass
box_a_x_center = (box_a[0] + box_a[2]) / 2
box_b_x_center = (box_b[0] + box_b[2]) / 2
box_a_y_center = (box_a[1] + box_a[3]) / 2
box_b_y_center = (box_b[1] + box_b[3]) / 2
... | 7,027 | 40.099415 | 135 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/tag_data_utilis.py | import numpy as np
import torch.nn as nn
from param import args
from lxrt.entry import LXRTEncoder
from lxrt.modeling import BertLayerNorm, GeLU
from lxrt.tokenization import BertTokenizer
import torch
import numpy as np
from collections import defaultdict
import numpy
import random
'''
Given that tags will be extensi... | 8,378 | 44.291892 | 152 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/text_data.py | import random
from torch.utils.data import Dataset
from lxrt.tokenization import BertTokenizer
import logging
from lxmert_data import InputExample
import json
from param import args
from lxmert_data import InputFeatures, random_word
import os
from src.tools import sharearray
import gc
from tqdm import tqdm
import numpy... | 18,260 | 38.270968 | 182 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/qa_answer_table.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import json
import torch
class AnswerTable:
ANS_CONVERT = {
"a man": "man",
"the man": "man",
"a woman": "woman",
"the woman": "woman",
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '... | 13,691 | 34.842932 | 147 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/lxmert_data.py | # coding=utf-8
# Copyleft 2019 project LXRT.
from collections import defaultdict
import json
import random
import numpy as np
from torch.utils.data import Dataset
import torch
from param import args
from src.pretrain.qa_answer_table import AnswerTable
from src.utils import load_obj_tsv
from copy import deepcopy
impor... | 42,230 | 43.453684 | 630 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/pretrain/lxmert_pretrain.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import collections
import os
import random
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import json
from param import args
from pretrain.lxmert_data import LXMERTDataset, LXMERTTorchDataset, LXMERTEvalu... | 21,642 | 41.189084 | 320 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/optimization.py | # coding=utf-8
# Copyright 2019 project LXRT
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:/... | 8,058 | 42.798913 | 141 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/entry.py | # coding=utf-8
# Copyright 2021 Project Unsupervised VisualBERT
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may... | 11,480 | 37.016556 | 125 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/modeling.py | # coding=utf-8
# Copyright 2019 project LXRT.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the ... | 69,048 | 45.124916 | 308 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/h5_data.py | import h5py
from copy import deepcopy
import numpy as np
import json
from torch.utils.data import Dataset
import torch
import random
from param import args
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import gc
from src.tools import sharearray
import os
def chunks(l... | 17,660 | 44.518041 | 225 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/lxrt/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from i... | 8,209 | 32.104839 | 112 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa_data.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import json
import os
import pickle
import numpy as np
import torch
from torch.utils.data import Dataset
import h5py
from copy import deepcopy
from param import args
from utils import load_obj_tsv
from pretrain.tag_data_utilis import create_tags
from lxrt.tokenization imp... | 10,280 | 34.329897 | 221 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa_model.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import torch.nn as nn
from param import args
from lxrt.entry import LXRTEncoder, convert_sents_to_features_tensors, convert_tags_to_tensorts, pad_np_arrays
from lxrt.modeling import BertLayerNorm, GeLU
from lxrt.tokenization import BertTokenizer
import numpy as np
# Max l... | 2,612 | 34.310811 | 192 | py |
visualbert | visualbert-master/unsupervised_visualbert/src/tasks/vqa.py | # coding=utf-8
# Copyleft 2019 project LXRT.
import os
import collections
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import h5py
import pandas as pd
from param import args
from pretrain.qa_answer_table import load_lxmert_qa, load_lxmert_from_sgg_and_lx... | 8,707 | 36.86087 | 125 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/vg_gqa_imgfeat/extract_gqa_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
import os, sys
sys.path.insert(0, BUTD_ROOT + "/tools")
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
f... | 6,511 | 35.58427 | 113 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/nlvr2_imgfeat/extract_nlvr2_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
# SPLIT to its folder name under IMG_ROOT
SPLIT2DIR = {
'train': 'train',
'valid': 'dev',
'test': 'test1',
'hidden': 'test2', # Please correct w... | 7,358 | 35.430693 | 113 | py |
visualbert | visualbert-master/unsupervised_visualbert/data/mscoco_imgfeat/extract_coco_image.py | # !/usr/bin/env python
# The root of bottom-up-attention repo. Do not need to change if using provided docker file.
BUTD_ROOT = '/opt/butd/'
# SPLIT to its folder name under IMG_ROOT
SPLIT2DIR = {
'train': 'train2014',
'valid': 'val2014',
'test': 'test2015',
}
import os, sys
sys.path.... | 6,810 | 35.42246 | 113 | py |
visualbert | visualbert-master/visualbert/models/model_wrapper.py | # Handles model training (optimizer), loading, saving
import argparse
import os
import shutil
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from... | 10,127 | 39.674699 | 134 | py |
visualbert | visualbert-master/visualbert/models/model.py | # Modified from VCR.
from typing import Dict, List, Any
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.parallel
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, FeedFor... | 14,578 | 42.912651 | 215 | py |
visualbert | visualbert-master/visualbert/models/train.py | """
Training script. Should be pretty adaptable to whatever.
"""
import argparse
import os
import shutil
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateSche... | 16,973 | 39.901205 | 166 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICEN... | 13,112 | 42.134868 | 139 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ModuleNotFoundError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, i... | 932 | 39.565217 | 137 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Modified by Harold. Added VisualBERT.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance ... | 84,216 | 48.077506 | 259 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/fine_tuning.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | 27,941 | 42.187017 | 139 | py |
visualbert | visualbert-master/visualbert/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing ... | 8,021 | 32.425 | 98 | py |
visualbert | visualbert-master/visualbert/dataloaders/vcr.py | # Modifed from R2C
"""
Dataloaders for VCR
"""
import json
import pickle
import os
from collections import defaultdict
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
fro... | 20,515 | 42.191579 | 168 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_dataset.py | import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import json
import os
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, Label... | 11,696 | 40.626335 | 158 | py |
visualbert | visualbert-master/visualbert/dataloaders/box_utils.py | import os
import random
import numpy as np
import scipy
import warnings
from torchvision.datasets.folder import default_loader
from torchvision.transforms import functional
USE_IMAGENET_PRETRAINED = True
##### Image
def load_image(img_fn):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
... | 2,765 | 35.88 | 119 | py |
visualbert | visualbert-master/visualbert/dataloaders/nlvr_dataset.py | import os
from torch.utils.data import Dataset
import numpy as np
import random
import json
from collections import defaultdict
from tqdm import tqdm
import json
import os
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, Label... | 9,904 | 44.645161 | 196 | py |
visualbert | visualbert-master/visualbert/dataloaders/bert_field.py | from typing import Dict, List, Optional
import textwrap
from overrides import overrides
from spacy.tokens import Token as SpacyToken
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.tokenizers.token import Token
from al... | 8,295 | 40.273632 | 119 | py |
visualbert | visualbert-master/visualbert/dataloaders/vqa_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from torch.utils.data import Dataset
import numpy as np
from copy import deepcopy
import torch
from torch.util... | 14,744 | 41.615607 | 184 | py |
visualbert | visualbert-master/visualbert/dataloaders/bert_data_utils.py | # Functions to convert raw strings into BERT input feature (InputFeatures' class method)
# Some functions for reading image features
# To take care of padding, we will use AllenNLP's Field;
# Caveat: we pad sequences with zero with one exception: BERT's pre-training language model objective mask's padding should be -1... | 21,319 | 39.378788 | 130 | py |
visualbert | visualbert-master/visualbert/dataloaders/coco_dataset.py | import os
import random
import json
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import numpy
import torch
from torch.utils.data import Dataset
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, Meta... | 21,482 | 45.600868 | 205 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_ban/utils.py | # Copied from https://github.com/jnhwkim/ban-vqa
"""
This code is extended from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import errno
import os
import re
import collections
import numpy as np
import operator
import functools
from PIL imp... | 9,306 | 29.817881 | 107 | py |
visualbert | visualbert-master/visualbert/dataloaders/flickr_ban/dataset.py | # Modified from https://github.com/jnhwkim/ban-vqa
"""
This code is modified from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import os
import json
import _pickle as cPickle
import pickle
import numpy as np
from visualbert.dataloaders.flick... | 23,011 | 35.8192 | 149 | py |
visualbert | visualbert-master/visualbert/utils/pytorch_misc.py | """
Question relevance model
"""
# Make stuff
import os
import re
import shutil
import time
import numpy as np
import pandas as pd
import torch
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.nn.util import device_mapping
from allennlp.training.trainer import move_optimizer_to_cuda
from torch.... | 15,975 | 38.156863 | 122 | py |
visualbert | visualbert-master/visualbert/utils/detector.py | """
ok so I lied. it's not a detector, it's the resnet backbone
"""
import torch
import torch.nn as nn
import torch.nn.parallel
from torchvision.models import resnet
from utils.pytorch_misc import Flattener
import torch.utils.model_zoo as model_zoo
#from config_vcr import USE_IMAGENET_PRETRAINED
from utils.pytorch_m... | 6,108 | 41.131034 | 139 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/get_mask.py | #!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | 12,823 | 31.383838 | 107 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/get_mask_utils.py | # Modified by Harold. Courtesy of the author of VCR
"""
Detect the images from a dataframe, saving masks to a json.
"""
from collections import defaultdict
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import logging
import os
import time
from caffe2.python import workspace
from detectro... | 14,915 | 38.989276 | 268 | py |
visualbert | visualbert-master/visualbert/utils/get_image_features/extract_image_features_nlvr.py | # Modified by Harold
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#... | 11,369 | 31.485714 | 107 | py |
skccm | skccm-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# skccm documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 24 16:48:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# auto... | 5,151 | 28.953488 | 79 | py |
EcoSVM | EcoSVM-master/SIcode/python_scripts/MNIST/EcoSVM_MNIST.py | #Owen Howell, July 20, 2019
#olh20@bu.edu, https://owenhowell20.github.io
#This code runs Eco_SVM on MNIST dataset
#Note: This code takes significant computational time (+1 days aprox) , for the plots made in paper each realization was done in parallel
#Note: The memory requirments are also large for full dataset. For... | 15,622 | 22.671212 | 195 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_1d.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from ... | 3,414 | 30.915888 | 113 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/vis_simple.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
from Harlow_Simple.harlow impo... | 1,850 | 25.826087 | 117 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_psychlab.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from ... | 6,521 | 37.591716 | 114 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/run_episode.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
import deepmind_lab as lab
from tqdm import tqdm
from collections import namedtuple
fr... | 5,563 | 35.605263 | 127 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_psychlab_single.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
import deepmind_lab as lab
from tqdm import tqdm
from collections import namedtuple
fr... | 14,014 | 36.573727 | 114 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/evaluate.py | import torch as T
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
from utils import get_test_loader
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytor... | 3,243 | 36.287356 | 122 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/utils.py | """
Create train, valid, test iterators for CIFAR-10 [1].
Easily extended to MNIST, CIFAR-100 and Imagenet.
[1]: https://discuss.pytorch.org/t/feedback-on-pytorch-for-kaggle-competitions/2252/4
"""
import torch
import imageio
import numpy as np
import matplotlib.pyplot as plt
from torchvision import datasets
from to... | 5,546 | 29.646409 | 85 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/train.py | import os
import yaml
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm
from copy import deepcopy
from torch.utils.tensorboard import SummaryWriter
from utils import get_trai... | 5,955 | 32.088889 | 160 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/common/shared_optim.py | import math
import torch as T
import torch.optim as optim
class SharedAdam(optim.Adam):
"""Implements Adam algorithm with shared states.
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay... | 4,680 | 35.286822 | 135 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/Harlow_PsychLab/train.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
import deepmind_lab as lab
f... | 10,760 | 31.315315 | 104 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/dnd.py | import torch as T
import torch.nn.functional as F
# constants
ALL_KERNELS = ['cosine', 'l1', 'l2']
ALL_POLICIES = ['1NN']
class DND:
"""The differentiable neural dictionary (DND) class. This enables episodic
recall in a neural network.
notes:
- a memory is a row vector
Parameters
----------
... | 6,304 | 29.756098 | 98 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/ep_lstm.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import torch as T
from torch import nn
from torch.nn import functional as F
from torch imp... | 3,780 | 26.398551 | 70 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.