repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
mix | mix-master/fairseq/data/legacy/block_pair_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from fairseq.data import FairseqDataset
class BlockPairDataset(FairseqDataset):
"""Break a... | 12,878 | 40.146965 | 99 | py |
mix | mix-master/fairseq/data/legacy/masked_lm_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from typing import Dict, List, Tuple
from fairseq.data import FairseqDataset, data_utils
from ... | 12,468 | 37.603715 | 83 | py |
mix | mix-master/fairseq/tasks/translation_from_pretrained_bart.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import LanguagePairDataset
from .translation import load_langpair_dataset, TranslationTask
from . import regi... | 4,719 | 40.403509 | 109 | py |
mix | mix-master/fairseq/tasks/translation_self_distill.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import json
import itertools
import logging
import os
import torch
import numpy as np
from fairseq import met... | 20,267 | 42.493562 | 126 | py |
mix | mix-master/fairseq/tasks/language_modeling.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary... | 10,106 | 36.712687 | 112 | py |
mix | mix-master/fairseq/tasks/translation_struct.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import os
import... | 10,110 | 39.606426 | 97 | py |
mix | mix-master/fairseq/tasks/multilingual_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
Concat... | 12,616 | 38.676101 | 98 | py |
mix | mix-master/fairseq/tasks/multilingual_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import logging
import os
import torch
from fairseq import metrics, options
from fairseq.data import (
... | 15,113 | 43.322581 | 116 | py |
mix | mix-master/fairseq/tasks/translation_lev.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from fairseq.utils import new_arange
from fairseq.tasks import register_task
from fairseq.tasks.translation import Tr... | 6,640 | 39.993827 | 87 | py |
mix | mix-master/fairseq/tasks/fairseq_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import data_utils, FairseqDataset, iter... | 15,928 | 36.21729 | 87 | py |
mix | mix-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# fairseq documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 17 21:45:30 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# au... | 4,235 | 30.849624 | 80 | py |
mix | mix-master/fairseq_cli/generate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import logging
import math
import os
import sys
import t... | 10,264 | 37.302239 | 110 | py |
mix | mix-master/fairseq_cli/validate.py | #!/usr/bin/env python3 -u
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
import torch
from fairseq import checkpoint_utils, options, ut... | 3,706 | 30.415254 | 88 | py |
mix | mix-master/fairseq_cli/eval_lm.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
fr... | 8,462 | 32.717131 | 112 | py |
mix | mix-master/fairseq_cli/interactive.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import ... | 7,270 | 32.353211 | 103 | py |
mix | mix-master/fairseq_cli/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import logging
import math
import os
import random
import ... | 11,933 | 35.054381 | 117 | py |
transmatching | transmatching-main/setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="transmatching", # Replace with your own username
version="0.0.1",
author="Example Author",
author_email="author@example.com",
description="A small example package",
long_description=l... | 1,064 | 26.307692 | 59 | py |
transmatching | transmatching-main/evaluation/evaluate.py | import itertools
import json
from pathlib import Path
from typing import Dict, Optional, Sequence, Union
import hydra
import igl
import meshio
import numpy as np
import omegaconf
from matplotlib import pyplot as plt
from pytorch_lightning import seed_everything
from scipy import sparse
from scipy.sparse.csgraph import... | 11,713 | 28.959079 | 89 | py |
transmatching | transmatching-main/evaluation/utils.py | import os
from pathlib import Path
from typing import Optional, Union
import dotenv
import git
import hydra
import numpy as np
import omegaconf
import torch
from hydra.core.global_hydra import GlobalHydra
from hydra.experimental import compose
from matplotlib import pyplot as plt
from plotly.graph_objs import Layout
... | 19,194 | 26.539455 | 105 | py |
transmatching | transmatching-main/evaluation/predict.py | import itertools
from typing import Dict, Union
import hydra
import meshio
import numpy as np
import omegaconf
from plotly import graph_objects as go
from pytorch_lightning import seed_everything
from tqdm import tqdm
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.utils import PROJECT_ROO... | 3,844 | 27.69403 | 85 | py |
transmatching | transmatching-main/evaluation/competitors/eval_dataset.py | import json
from typing import Dict
import meshio
import numpy as np
from torch.utils.data import Dataset
from evaluation.utils import PROJECT_ROOT
class EvalDataset(Dataset):
def __init__(self, dataset_name: str):
"""
A generic dataset that is able to read every dataset that follows the structu... | 2,406 | 28.353659 | 88 | py |
transmatching | transmatching-main/evaluation/competitors/our/our.py | from typing import Dict
import meshio
import numpy as np
import scipy.io
import torch
from transmatching.Model.model import Model
from transmatching.Utils.refine import refine, refine_hires
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.competitors.eval_model import ModelMatching
from eva... | 9,955 | 31.75 | 143 | py |
transmatching | transmatching-main/evaluation/competitors/our_refined/our_refined.py | from pytorch_lightning import seed_everything
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.competitors.our.our import OurMatching
class OurMatchingRefined(OurMatching):
def __init__(self, **kwargs) -> None:
super(OurMatchingRefined, self).__init__(refine=True, **kwargs)
... | 1,691 | 28.684211 | 80 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_s2t/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT, Mesh, plot_meshes
N_PAIRS = 100
FAUST_REM = Path(PROJECT_ROOT / "evaluation/datasets/faust_1k/FAUSTS_rem.mat")
TEMP... | 1,753 | 29.241379 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from tqdm import tqdm
FAUST_PATH = Path("/run/media/luca/LocalDisk/Datasets/MPI-FAUST/training/registrations")
assert FAUST_PATH.exists(), "Do not regenerate! Download from Drive or DVC."
N_PAIRS = 100... | 1,559 | 30.836735 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
N_PAIRS = 100
FAUST_REM = Path("/home/luca/Desktop/FAUSTS_rem.mat")
seed_everything(0)
sha... | 1,649 | 29 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_outliers/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from scipy import io
from scipy.spatial.transform import Rotation as R
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT, Mesh, plot_meshes
N_PAIRS = 100
FAUST_0NOISE = Path(
PROJECT_... | 2,476 | 27.802326 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_noise/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
N_PAIRS = 100
FAUST_NOISE = Path(
PROJECT_ROOT / "evaluation/datasets/faust_1k_noise/FAU... | 1,729 | 27.833333 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_permuted/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from plotly import graph_objects as go
from pytorch_lightning import seed_everything
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
FAUST_PATH = Path("/run/media/luca/LocalDisk/Datasets/MPI-FAUST/training/r... | 2,803 | 27.612245 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_s2t/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from scipy.io import loadmat
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT, Mesh, invert_permutation, plot_meshes
FAUST_PATH = Path("/run/media/luca/LocalDisk/Datasets/MPI-FAUST/train... | 1,995 | 32.266667 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/shrec19/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
import scipy
from meshio import Mesh
from plotly import graph_objects as go
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
seed_everything(0)
SHREC_PATH = Path(PR... | 1,681 | 29.035714 | 95 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_0noise/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
N_PAIRS = 100
FAUST_0NOISE = Path(
PROJECT_ROOT / "evaluation/datasets/faust_1k_0noise/F... | 1,733 | 27.9 | 94 | py |
transmatching | transmatching-main/evaluation/ui/generate_point_colors.py | import numpy as np
import streamlit as st
from pytorch_lightning import seed_everything
from stqdm import stqdm
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.utils import (
PROJECT_ROOT,
Mesh,
convert_colors,
get_dists,
get_hydra_cfg,
get_point_colors,
plot_mes... | 3,605 | 24.394366 | 90 | py |
transmatching | transmatching-main/transmatching/Data/dataset_faust.py | import numpy as np
import os
import torch
import trimesh
from torch.utils.data import Dataset
from scipy.io import loadmat
from transmatching.Utils.utils import RandomRotateCustom, est_area
class FaustDataset(Dataset):
def __init__(self, in_path, area=True):
self.in_path = in_path
self.area = are... | 1,705 | 31.188679 | 141 | py |
transmatching | transmatching-main/transmatching/Data/dataset_smpl.py | import numpy as np
import os
import torch
import trimesh
from torch.utils.data import Dataset
from transmatching.Utils.utils import RandomRotateCustom, est_area
class SMPLDataset(Dataset):
def __init__(self, in_path, train=True, area=True):
self.in_path = in_path
self.train = train
self... | 1,668 | 31.72549 | 111 | py |
transmatching | transmatching-main/transmatching/Model/feedforward.py | from torch import nn
import torch.nn.functional as F
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=32, dropout=0.05):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def f... | 435 | 23.222222 | 55 | py |
transmatching | transmatching-main/transmatching/Model/norm.py | import torch
from torch import nn
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x... | 467 | 23.631579 | 121 | py |
transmatching | transmatching-main/transmatching/Model/layernorm.py | from torch import nn
class AddNorm(nn.Module):
def __init__(self, normalized_shape, dropout):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(normalized_shape)
def forward(self, X, Y):
return self.ln(self.dropout(Y) + X)
| 296 | 18.8 | 50 | py |
transmatching | transmatching-main/transmatching/Model/model.py | import torch
from torch import nn
from transmatching.Model.decoder import Decoder
from transmatching.Model.encoder import Encoder
from transmatching.Model.attention import MultiHeadAttention
from transmatching.Model.feedforward import FeedForward
from transmatching.Model.layernorm import AddNorm
from transmatching.Mo... | 2,138 | 37.890909 | 133 | py |
transmatching | transmatching-main/transmatching/Model/encoder.py | from torch import nn
from transmatching.Model.attention import MultiHeadAttention
from transmatching.Model.feedforward import FeedForward
from transmatching.Model.layernorm import AddNorm
from transmatching.Model.norm import Norm
from transmatching.Model.pos_enc import PositionalEncoderLearnt
import torch
from transmat... | 2,357 | 35.84375 | 119 | py |
transmatching | transmatching-main/transmatching/Model/decoder.py | import torch
from torch import nn
from transmatching.Model.attention import MultiHeadAttention
from transmatching.Model.feedforward import FeedForward
from transmatching.Model.layernorm import AddNorm
from transmatching.Model.norm import Norm
from transmatching.Model.pos_enc import PositionalEncoderLearnt
from transmat... | 2,412 | 35.014925 | 99 | py |
transmatching | transmatching-main/transmatching/Model/attention.py | import math
import torch
from torch import nn
import torch.nn.functional as F
from transmatching.Model.debug import Debug
try:
from pykeops.torch import LazyTensor
except ImportError:
Debug.keops=False
def attention(q, k, v, d_k, mask=None, dropout=None, weights=None, w=1):
if Debug.keops:
bs = ... | 2,928 | 28.887755 | 127 | py |
transmatching | transmatching-main/transmatching/Model/pos_enc.py | import torch
from torch import nn
class PositionalEncoderLearnt(nn.Module):
def __init__(self, d_model, max_seq_len):
super().__init__()
self.pos = nn.Parameter(torch.zeros(max_seq_len, d_model))
def forward(self, x):
seq_len = x.size(-2)
x = x + self.pos[:seq_len]
re... | 328 | 19.5625 | 66 | py |
transmatching | transmatching-main/transmatching/Utils/refine.py | import torch
# from transmatching.Model.model import Model
import matplotlib.pyplot as plt
import time
import gc
from transmatching.Utils.utils import get_clones, est_area, chamfer_loss
from transmatching.Model.debug import Debug
def chamfer(y_hat,src):
dist = torch.cdist(y_hat,src)
loss = dist.min(-2)[0].mea... | 7,206 | 31.463964 | 88 | py |
transmatching | transmatching-main/transmatching/Utils/utils.py | import igl
import torch
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import numpy as np
from plotly.subplots import make_subplots
from torch import nn
import copy
from transmatching.Model.debug import Debug
from scipy import sparse
from scipy.sparse.csgraph import dijkstra
from scipy.spatial.distan... | 7,124 | 27.846154 | 120 | py |
transmatching | transmatching-main/test/test.py | import torch
from tqdm import tqdm
from transmatching.Model.model import Model
from argparse import ArgumentParser
from transmatching.Utils.utils import get_errors, area_weighted_normalization, chamfer_loss, approximate_geodesic_distances
import numpy as np
from pytorch_lightning import seed_everything
from scipy.io im... | 3,814 | 29.766129 | 123 | py |
transmatching | transmatching-main/test/train.py | import os
import time
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transmatching.Data.dataset_smpl import SMPLDataset
from transmatching.Model.model import Model
from argparse import ArgumentParser
def main(args):
# ------------------------------------------------------------------... | 4,167 | 31.310078 | 120 | py |
BeatNet | BeatNet-main/setup.py | """
Created 07-01-21 by Mojtaba Heydari
"""
# Local imports
# None.
# Third party imports
# None.
# Python standard library imports
import setuptools
from setuptools import find_packages
import distutils.cmd
# Required packages
REQUIRED_PACKAGES = [
'numpy',
'cython',
'librosa>=0.8.0',
'numba==0.5... | 1,983 | 22.619048 | 167 | py |
BeatNet | BeatNet-main/src/BeatNet/BeatNet.py | # This is the script handler of the BeatNet. First, it extracts the input embeddings of the current frame or the whole song, depending on the working mode.
# Then by feeding them into the selected pre-trained model, it calculates the beat/downbeat activation probabilities.
# Finally, it infers beats and downbeats of t... | 12,681 | 58.539906 | 296 | py |
BeatNet | BeatNet-main/src/BeatNet/model.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
class BDA(nn.Module): #beat_downbeat_activation
def __init__(self, dim_in, num_cells, num_layers, device):
super(BDA, self).__init__()
self.dim_in = dim_in
self.dim_hd = num_cells
self.num_layer... | 2,088 | 34.40678 | 132 | py |
PaperRobot | PaperRobot-master/New paper writing/test.py | import gc
import os
import time
import torch
import pickle
import argparse
import torch.nn as nn
from eval_final import Evaluate
from loader.preprocessing import prepare_mapping, AssembleMem, printcand, filter_stopwords
from loader.loader import load_file_with_terms
from memory_generator.seq2seq import Seq2seq
from m... | 3,280 | 28.558559 | 103 | py |
PaperRobot | PaperRobot-master/New paper writing/train.py | import gc
import os
import sys
import time
import torch
import pickle
import argparse
import torch.nn as nn
from collections import OrderedDict
from eval import Evaluate
from loader.logger import Tee
from loader.loader import load_file_with_terms
from loader.preprocessing import prepare_mapping, AssembleMem
from util... | 9,621 | 32.065292 | 116 | py |
PaperRobot | PaperRobot-master/New paper writing/input.py | import torch
import pickle
import argparse
import torch.nn as nn
from loader.preprocessing import prepare_mapping, filter_stopwords
from memory_generator.seq2seq import Seq2seq
from memory_generator.Encoder import EncoderRNN
from memory_generator.Encoder import TermEncoder
from memory_generator.predictor import Predic... | 4,304 | 30.888889 | 111 | py |
PaperRobot | PaperRobot-master/New paper writing/loader/preprocessing.py | from collections import Counter
import torch
import json
import string
# Mask variable
def _mask(prev_generated_seq, device, eos_id):
prev_mask = torch.eq(prev_generated_seq, eos_id)
lengths = torch.argmax(prev_mask, dim=1)
max_len = prev_generated_seq.size(1)
mask = []
for i in range(prev_generat... | 11,373 | 42.746154 | 1,494 | py |
PaperRobot | PaperRobot-master/New paper writing/loader/loader.py | import os
import json
import gzip
import lzma
import torch
import torch.nn as nn
from loader.preprocessing import create_mapping
def load_files(path):
sources = []
targets = []
words = []
for line in open(path, 'r'):
line = line.strip()
file = json.loads(line)
sources.append(f... | 6,934 | 27.539095 | 121 | py |
PaperRobot | PaperRobot-master/New paper writing/utils/optim.py | import torch.optim as optim
def get_optimizer(model, lr_method, lr_rate):
"""
parse optimization method parameters, and initialize optimizer function
"""
lr_method_name = lr_method
# initialize optimizer function
if lr_method_name == 'sgd':
optimizer = optim.SGD(model.parameters(), lr... | 901 | 36.583333 | 85 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/Decoder.py | import sys
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from .baseRNN import BaseRNN
from .utils import MemoryComponent
class DecoderRNN(BaseRNN):
def __init__(self, vocab_size, embedding, word_dim, sos_id, eos_id, unk_id,
max_len=150, input_dropout_p=0, l... | 18,992 | 50.332432 | 120 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/predictor.py | import gc
from itertools import groupby
import torch
import statistics
def filter_duplicate(sents):
sents = sents.split('.')
used = []
used_s = []
tmp = ""
for ss in sents:
tttmp = ''
for s in ss.split(','):
if s not in used:
if len(s) < 2:
... | 7,888 | 37.862069 | 111 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/seq2seq.py | import torch.nn as nn
class Seq2seq(nn.Module):
def __init__(self, ref_encoder, term_encoder, decoder):
super(Seq2seq, self).__init__()
self.ref_encoder = ref_encoder
self.term_encoder = term_encoder
self.decoder = decoder
def forward(self, batch_s, batch_o_s, source_len, max... | 924 | 45.25 | 113 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/baseRNN.py | """ A base class for RNN. """
import torch.nn as nn
class BaseRNN(nn.Module):
def __init__(self, vocab_size, hidden_size, input_dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.n_layers = n_layers
... | 735 | 32.454545 | 85 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/utils.py | import torch
import copy
import math
import torch.nn as nn
import torch.nn.functional as F
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class MemoryComponent(nn.Module):
def __init__(self, hop, h, d_model, dropout_p):
super... | 1,774 | 38.444444 | 118 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/Encoder.py | import torch.nn as nn
from .baseRNN import BaseRNN
class EncoderRNN(BaseRNN):
def __init__(self, vocab_size, embedding, hidden_size, input_dropout_p,
n_layers=1, bidirectional=True, rnn_type='gru'):
super(EncoderRNN, self).__init__(vocab_size, hidden_size, input_dropout_p, n_layers, rnn_... | 1,429 | 31.5 | 102 | py |
PaperRobot | PaperRobot-master/Existing paper reading/test.py | from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
import os, sys, math, pickle, gc
from utils.utils import convert_index, get_subgraph, adjust_sent_o... | 7,048 | 38.161111 | 125 | py |
PaperRobot | PaperRobot-master/Existing paper reading/train.py | from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
import os, sys, math, pickle, gc
from utils.utils import convert_index, get_subgraph, adjust_sent_o... | 10,576 | 35.347079 | 121 | py |
PaperRobot | PaperRobot-master/Existing paper reading/utils/data_loader.py | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from .utils import generate_corrupt_triples, load_triple_dict
from torch.utils import data
class LinkPredictionDataset(Dataset):
def __init__(self, kg_file, txt_file, id2ent, num_ent):
self.triples, self.triple_dict, self.tri... | 2,949 | 34.119048 | 116 | py |
PaperRobot | PaperRobot-master/Existing paper reading/utils/utils.py | import numpy as np
from sys import getsizeof
import torch
import math
import networkx as nx
import json
import pickle
import codecs
from collections import defaultdict, Counter
class KnowledgeGraph:
def __init__(self):
self.G = nx.DiGraph()
self.triples = []
def load_file(self, fn, delimiter,... | 7,972 | 27.783394 | 104 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/TAT.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class TAT(nn.Module):
"""
A Bi-LSTM layer with attention
"""
def __init__(self, embedding_dim, voc_size):
... | 1,173 | 36.870968 | 82 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/graph_attention.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLay... | 1,938 | 40.255319 | 220 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/GAT.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from .graph_attention import GraphAttentionLayer
class GAT(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat... | 763 | 35.380952 | 126 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/GATA.py | # --------- Link Prediction Model with both TAT and GAT contained -----------
import torch.nn as nn
import torch
from .GAT import GAT
from .TAT import TAT
class GATA(nn.Module):
def __init__(self, emb_dim, hid_dim, out_dim, num_voc, num_heads, num_ent, num_rel, dropout, alpha, **kwargs):
super(GATA, self)... | 1,737 | 41.390244 | 114 | py |
speech-resynthesis | speech-resynthesis-main/inference.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import argparse
import glob
import json
import os
import random
import s... | 10,958 | 32.411585 | 121 | py |
speech-resynthesis | speech-resynthesis-main/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import glob
import os
import shutil
import matplotlib
import torch
from... | 2,008 | 24.1125 | 64 | py |
speech-resynthesis | speech-resynthesis-main/dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import random
from pathlib import Path
import amfm_decompy.basic_tools ... | 15,169 | 33.555809 | 115 | py |
speech-resynthesis | speech-resynthesis-main/train_f0_vq.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import warnings
warnings.simplefilter(action='ignore', category=FutureW... | 8,872 | 39.701835 | 115 | py |
speech-resynthesis | speech-resynthesis-main/models.py | # adapted from https://github.com/jik876/hifi-gan
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from modules.jukebox import Encoder, Decoder
from utils import ... | 14,478 | 36.31701 | 120 | py |
speech-resynthesis | speech-resynthesis-main/infer_vqvae_codes.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import random
import sys
from multiprocessing import Manager, Pool
from pathlib impor... | 3,868 | 24.123377 | 86 | py |
speech-resynthesis | speech-resynthesis-main/train.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import warnings
warnings.simplefilter(action='ignore', category=FutureWa... | 15,924 | 46.966867 | 137 | py |
speech-resynthesis | speech-resynthesis-main/modules/vq.py | # Adapted from https://github.com/openai/jukebox
import numpy as np
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import modules.dist as dist
class BottleneckBlock(nn.Module):
def __init__(self, k_bins, emb_width, mu):
super().__init__()
self.k_bins = k_bins
sel... | 8,566 | 33.268 | 120 | py |
speech-resynthesis | speech-resynthesis-main/modules/resnet.py | # Adapted from https://github.com/openai/jukebox
import math
import torch.nn as nn
import modules.dist as dist
class ResConvBlock(nn.Module):
def __init__(self, n_in, n_state):
super().__init__()
self.model = nn.Sequential(
nn.ReLU(),
nn.Conv2d(n_in, n_state, 3, 1, 1),
... | 2,603 | 30.373494 | 110 | py |
speech-resynthesis | speech-resynthesis-main/modules/dist.py | # Adapted from https://github.com/openai/jukebox
from enum import Enum
import torch.distributed as dist
class ReduceOp(Enum):
SUM = 0,
PRODUCT = 1,
MIN = 2,
MAX = 3
def ToDistOp(self):
return {
self.SUM: dist.ReduceOp.SUM,
self.PRODUCT: dist.ReduceOp.PRODUCT,
... | 2,013 | 17.477064 | 56 | py |
speech-resynthesis | speech-resynthesis-main/modules/jukebox.py | # Adapted from https://github.com/openai/jukebox
import numpy as np
import torch.nn as nn
from modules.resnet import Resnet1D
def assert_shape(x, exp_shape):
assert x.shape == exp_shape, f"Expected {exp_shape} got {x.shape}"
class EncoderConvBlock(nn.Module):
def __init__(self, input_emb_width, output_emb_... | 7,855 | 42.888268 | 120 | py |
speech-resynthesis | speech-resynthesis-main/examples/speech_to_speech_translation/inference.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import argparse
import glob
import json
import os
import random
import s... | 9,266 | 31.861702 | 121 | py |
speech-resynthesis | speech-resynthesis-main/examples/speech_to_speech_translation/models.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import torch
import torch.nn as nn
import torch.nn.functional as F
from... | 4,834 | 35.908397 | 96 | py |
speech-resynthesis | speech-resynthesis-main/examples/speech_to_speech_translation/train.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import warnings
warnings.simplefilter(action='ignore', category=FutureWa... | 14,559 | 45.967742 | 137 | py |
speech-resynthesis | speech-resynthesis-main/scripts/parse_vqvae_codes.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import random
from pathlib import Path
import torchaudio
from tqdm import tqdm
def parse_manifest(manife... | 3,321 | 28.39823 | 70 | py |
t-leap | t-leap-main/test.py | import os
from datetime import datetime
# scipy imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Pytorch imports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets.seq_pose_dataset import SequentialPoseDataset
from torch.optim import Adam, lr_sched... | 5,451 | 34.633987 | 138 | py |
t-leap | t-leap-main/train_seq.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/l... | 11,265 | 39.235714 | 176 | py |
t-leap | t-leap-main/core/evaluate.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Source: https://github.com/microsoft/human-pose-estimation.pytorch/
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Adapted by Helena Russello (helena@russello.dev)
# ---------... | 4,643 | 38.355932 | 107 | py |
t-leap | t-leap-main/models/tleap.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/l... | 7,665 | 43.312139 | 144 | py |
t-leap | t-leap-main/datasets/seq_pose_dataset.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/l... | 23,293 | 40.155477 | 178 | py |
t-leap | t-leap-main/utils/data_utils.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/l... | 7,253 | 37.585106 | 171 | py |
t-leap | t-leap-main/utils/plotting_utils.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/l... | 4,876 | 38.016 | 173 | py |
t-leap | t-leap-main/utils/train_utils.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/l... | 2,444 | 29.949367 | 125 | py |
ditto | ditto-master/train_ditto.py | import os
import argparse
import json
import sys
import torch
import numpy as np
import random
sys.path.insert(0, "Snippext_public")
from ditto_light.dataset import DittoDataset
from ditto_light.summarize import Summarizer
from ditto_light.knowledge import *
from ditto_light.ditto import train
if __name__=="__main__... | 3,355 | 35.086022 | 79 | py |
ditto | ditto-master/matcher.py | import torch
import torch.nn as nn
import os
import numpy as np
import random
import json
import jsonlines
import csv
import re
import time
import argparse
import sys
import sklearn
import traceback
from torch.utils import data
from tqdm import tqdm
from apex import amp
from scipy.special import softmax
from ditto_li... | 10,842 | 31.175074 | 110 | py |
ditto | ditto-master/ditto_light/dataset.py | import torch
from torch.utils import data
from transformers import AutoTokenizer
from .augment import Augmenter
# map lm name to huggingface's pre-trained model names
lm_mp = {'roberta': 'roberta-base',
'distilbert': 'distilbert-base-uncased'}
def get_tokenizer(lm):
if lm in lm_mp:
return AutoT... | 3,634 | 29.805085 | 85 | py |
ditto | ditto-master/ditto_light/ditto.py | import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import numpy as np
import sklearn.metrics as metrics
import argparse
from .dataset import DittoDataset
from torch.utils import data
from transformers import AutoModel, AdamW, get_linear_sch... | 7,593 | 31.592275 | 86 | py |
ditto | ditto-master/blocking/train_blocker.py | import os
import argparse
import json
import sys
import math
sys.path.insert(0, "sentence-transformers")
from sentence_transformers.readers import InputExample
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer
from sentence_transfo... | 3,755 | 34.433962 | 104 | py |
LiteFlowNet2 | LiteFlowNet2-master/models/testing/test_iter.py | #!/usr/bin/env python
import os, sys
import subprocess
from math import ceil
caffe_bin = 'bin/caffe.bin'
img_size_bin = 'bin/get_image_size'
template = './deploy_MODEL.prototxt' # MODEL = LiteFlowNet2-ft-sintel or LiteFlowNet2-ft-kitti
cnn_model = 'MODEL'
# =========================================================
d... | 4,781 | 31.09396 | 147 | py |
LiteFlowNet2 | LiteFlowNet2-master/models/testing/test_batch.py | #!/usr/bin/env python
import os, sys
import subprocess
from math import ceil
caffe_bin = 'bin/caffe.bin'
img_size_bin = 'bin/get_image_size'
template = './deploy_MODEL.prototxt' # MODEL = LiteFlowNet2-ft-sintel or LiteFlowNet2-ft-kitti
cnn_model = 'MODEL'
# =========================================================
d... | 3,970 | 27.775362 | 108 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.