repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Voxurf | Voxurf-main/run.py | import os, sys, copy, glob, json, time, random, argparse, cv2
from shutil import copyfile
from tqdm import tqdm, trange
import math
import mmcv
import imageio
import numpy as np
import trimesh
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from datetime import datetime
from lib impor... | 56,095 | 49.130474 | 210 | py |
Voxurf | Voxurf-main/tools/preprocess/convert_cameras.py | import numpy as np
# import matplotlib.image as mpimg
# import matplotlib.pyplot as plt
# import cv2
# import argparse
# from glob import glob
import torch
import os
import argparse
import glob
import imageio
def _load_colmap(basedir, convert=True, suffix=''):
poses_arr = np.load(os.path.join(basedir, 'poses_bou... | 9,091 | 38.021459 | 116 | py |
Voxurf | Voxurf-main/lib/load_dtu.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
de... | 7,606 | 41.497207 | 120 | py |
Voxurf | Voxurf-main/lib/dvgo_ori.py | import os
import time
import functools
import numpy as np
import cv2
import mcubes
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
'''Model'''
class DirectVoxGO(torch.nn.Module):
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=... | 32,546 | 45.231534 | 149 | py |
Voxurf | Voxurf-main/lib/load_nsvf.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],... | 1,712 | 26.629032 | 115 | py |
Voxurf | Voxurf-main/lib/voxurf_womask_coarse.py | import os
import time
import functools
import numpy as np
import cv2
import math
import random
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
import copy
# import MinkowskiEngine as Me
from . import grid
from torch_scatter im... | 44,856 | 42.720273 | 162 | py |
Voxurf | Voxurf-main/lib/load_blendedmvs.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
def load_blendedmvs_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_pose... | 1,312 | 30.261905 | 118 | py |
Voxurf | Voxurf-main/lib/voxurf_womask_fine.py | import os
import time
import functools
import numpy as np
from copy import deepcopy
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
import math
import random
import copy
from . import grid
from torch_scatter import s... | 65,267 | 43.61244 | 162 | py |
Voxurf | Voxurf-main/lib/voxurf_coarse.py | import os
import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import segment_coo
from torch.utils.cpp_extension import load
from . import grid
from lib.dvgo_ori import extract_geometry
parent_dir = os.path.dirna... | 40,171 | 42.760349 | 153 | py |
Voxurf | Voxurf-main/lib/load_mobilebrick.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
de... | 3,429 | 34.360825 | 114 | py |
Voxurf | Voxurf-main/lib/utils.py | import os, math
import numpy as np
import scipy.signal
from typing import List, Optional
from torch import Tensor
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
from plyfile import PlyData, PlyElement
import matplotlib.cm as cm
import matplotlib as matplot... | 45,758 | 36.848635 | 132 | py |
Voxurf | Voxurf-main/lib/ref_utils.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, ... | 6,003 | 35.609756 | 84 | py |
Voxurf | Voxurf-main/lib/load_co3d.py | import os
import json
import gzip
import glob
import torch
import numpy as np
import imageio
import torch.nn.functional as F
import cv2
def load_co3d_data(cfg):
# load meta
with gzip.open(cfg.annot_path, 'rt', encoding='utf8') as zipfile:
annot = [v for v in json.load(zipfile) if v['sequence_name'] =... | 3,135 | 35.465116 | 109 | py |
Voxurf | Voxurf-main/lib/load_tankstemple.py | import os
import glob
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
def load_tankstemple_data(basedir):
pose_paths = sorted(glob.glob(os.path.join(basedir, 'pose', '*txt')))
rgb_paths = sorted(glob.glob(os.path.join(basedir, 'rgb', '*png')))
all_pos... | 3,813 | 32.752212 | 117 | py |
Voxurf | Voxurf-main/lib/load_scannet.py | import os
import torch
import torch.nn.functional as F
import numpy as np
from glob import glob
import cv2
import random
import imageio
import skimage
def load_rgb(path, normalize_rgb = False):
img = imageio.imread(path)
img = skimage.img_as_float32(img)
# if normalize_rgb: # [-1,1] --> [0,1]
# ... | 6,003 | 31.106952 | 153 | py |
Voxurf | Voxurf-main/lib/voxurf_fine.py | import os
import time
import numpy as np
from copy import deepcopy
import cv2
import math
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.dvgo_ori import extract_geometry
from torch_scatter import segment_coo
from . import grid
from torch.utils.cpp_extension ... | 51,680 | 42.871817 | 153 | py |
Voxurf | Voxurf-main/lib/load_nerfpp.py | '''
Modify from
https://github.com/Kai-46/nerfplusplus/blob/master/data_loader_split.py
'''
import os
import glob
import scipy
import imageio
import numpy as np
import torch
########################################################################################################################
# camera coordinate syst... | 5,638 | 33.175758 | 120 | py |
Voxurf | Voxurf-main/lib/grid.py | import os
import time
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.cpp_extension import load
parent_dir = os.path.dirname(os.path.abspath(__file__))
render_utils_cuda = load(
name='render_utils_cuda',
sources=[
os.p... | 11,636 | 46.11336 | 134 | py |
Voxurf | Voxurf-main/lib/load_blender.py | import os
import torch
import numpy as np
import imageio
import json
import torch.nn.functional as F
import cv2
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.s... | 2,553 | 27.065934 | 115 | py |
Voxurf | Voxurf-main/lib/load_volsdf_bmvs.py | import torch
import torch.nn.functional as F
import cv2 as cv
import numpy as np
import os
from glob import glob
from icecream import ic
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
import imageio
# This function is borrowed from IDR: https://github.com/lioryariv/idr
de... | 2,879 | 34.121951 | 111 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/modeling.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import CLIPProcessor, CLIPModel
from torch import Tensor
from dataclasses import dataclass
from typing import... | 29,954 | 40.146978 | 122 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/data_preprocess.py | from tqdm import tqdm
import json
import codecs
import requests
import pandas as pd
from transformers import BertTokenizer, AutoTokenizer
from os import listdir
from os.path import isfile, join
import torch
import numpy as np
import random
json_load = lambda x: json.load(codecs.open(x, 'r', encoding='utf-8'))
json_dum... | 13,897 | 32.570048 | 115 | py |
Dialogue-to-Video-Retrieval | Dialogue-to-Video-Retrieval-main/run_dialogue_to_video_retrieval.py | """ running training and evaluation code for dialogue-to-video retrieval
Created by Chenyang Lyu
"""
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import TensorDataset
from tra... | 25,440 | 46.025878 | 180 | py |
PLRDiff | PLRDiff-main/test_single.py | import argparse
import os
import numpy as np
import torch as th
import torch.nn.functional as nF
from pathlib import Path
from guided_diffusion import utils
from guided_diffusion.create import create_model_and_diffusion_RS
import scipy.io as sio
from collections import OrderedDict
from os.path import join
from skimag... | 5,697 | 35.292994 | 146 | py |
PLRDiff | PLRDiff-main/guided_diffusion/core.py | '''
copied from
https://github.com/sanghyun-son/bicubic_pytorch
A standalone PyTorch implementation for fast and efficient bicubic resampling.
The resulting values are the same to MATLAB function imresize('bicubic').
## Author: Sanghyun Son
## Email: sonsang35@gmail.com (primary), thstkdgus35@snu.ac.kr (s... | 13,613 | 27.904459 | 84 | py |
PLRDiff | PLRDiff-main/guided_diffusion/rsfac_gaussian_diffusion.py | """
This code started out as a PyTorch port of the following:
https://github.com/HJ-harry/MCG_diffusion/blob/main/guided_diffusion/gaussian_diffusion.py
The conditions are changed and coefficient matrix estimation is added.
"""
import enum
import math
import numpy as np
import torch as th
from torch.autograd import ... | 16,226 | 34.900442 | 129 | py |
PLRDiff | PLRDiff-main/guided_diffusion/sr3_modules/unet.py | import math
import torch
from torch import nn
import torch.nn.functional as F
from inspect import isfunction
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# PositionalEncoding Source: https://github.com/lmnt-com/wavegrad/b... | 9,347 | 31.8 | 150 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/diffusion.py | import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from diffusers import DDPMScheduler, UNet2DModel
from matplotlib import pyplot as plt
from diffusers import DDIMScheduler, DDPMPipeline
from data.dataset import data_loader
import wandb
impo... | 4,562 | 32.551471 | 152 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/test.py | import argparse
import torch
from tqdm import tqdm
import os
from models import mlp
from data.dataset import data_loader
from data.dataset import data_loader_attacks
root_dir = "./data/attack-data/0.03"
def test_vit(model, dataloader_test):
"""
This function used to test ViT.
Args:
model: ViT... | 2,645 | 31.666667 | 94 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/aabb.py | import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from diffusers import DDIMScheduler, DDPMPipeline
from matplotlib import pyplot as plt
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from torchvision.datasets import ImageFolder
from torch.utils.dat... | 1,250 | 29.512195 | 93 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/attack.py |
import foolbox as fb
import torch
import torch.nn as nn
from autoattack import AutoAttack
class Attack():
"""
This class used to generate adversarial images.
when create object specify epsilon: float, attack_type: 'FGSM, CW, BIM, L2PGD, PGD, LinfBIM'.
generate method return images and success tens... | 7,000 | 34.358586 | 99 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/mlp.py | import torch.nn as nn
import torch
from utils import get_classifiers_list
class Classifier(nn.Module):
"""
MLP classifier.
Args:
num_classes -> number of classes
in_feature -> features dimension
return logits.
"""
def __init__(self,num_classes=2 ,in_features = 768*196... | 2,262 | 33.815385 | 152 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/utils.py | import os
import torch
from attack import Attack
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import save_image
from autoattack import AutoAttack
def generate_save_attacks(attack_names, model, samples, classes ,attack_image_dir, epsilon = 0.03, batch_size=30):
"""
it saves att... | 6,033 | 37.433121 | 177 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/majority_voting.py | import os
import torch
import argparse
import numpy as np
from utils import *
from data.dataset import data_loader, data_loader_attacks
import mlp
def majority_voting(data_loader, model, mlps_list):
"""
SEViT performance with majority voting.
Args:
data_loader: loader of test samples for clean i... | 2,732 | 30.77907 | 122 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/finetuning_diffusion_model.py | import numpy as np
import torch
import torch.nn.functional as F
import torchvision
from diffusers import DDIMScheduler, DDPMPipeline
from matplotlib import pyplot as plt
from PIL import Image
from torchvision import transforms
from diffusers import DDPMScheduler, UNet2DModel
from tqdm.auto import tqdm
from torchvision.... | 4,351 | 30.766423 | 156 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/generate_attacks.py | import torch
import argparse
from attack import Attack
from utils import *
from data.dataset import data_loader
from mlp import Big_model
parser = argparse.ArgumentParser(description='Generate Attack from ViT')
parser.add_argument('--epsilons', type=float ,
help='Perturbations Size')
parser.add... | 1,179 | 28.5 | 72 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/adversarial_detection.py | import torch
import numpy as np
from utils import *
import argparse
from data.dataset import data_loader, data_loader_attacks
parser = argparse.ArgumentParser(description='ROC For Attack')
parser.add_argument('--clean_image_folder_path', type=str ,
help='Path to root directory of images')
parse... | 1,975 | 36.283019 | 132 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/models/mlp.py | import torch.nn as nn
import torch
class Classifier(nn.Module):
"""
MLP classifier.
Args:
num_classes -> number of classes
in_feature -> features dimension
return logits.
"""
def __init__(self,num_classes=2 ,in_features = 768*196):
super().__init__()
... | 911 | 30.448276 | 78 | py |
Defensive_Diffusion-testing | Defensive_Diffusion-testing/data/dataset.py | import os
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
# DataLoader and Dataset (Clean Samples)
def data_loader( root_dir, image_size = (224,224), batch_size= 15, train_dir = 'training',test_dir = 'testing', vald_dir = 'validatio... | 4,070 | 36.694444 | 139 | py |
stylegan-encoder | stylegan-encoder-master/train_effnet.py | """
Trains a modified EfficientNet to generate approximate dlatents using examples from a trained StyleGAN.
Props to @SimJeg on GitHub for the original code this is based on, from this thread: https://github.com/Puzer/stylegan-encoder/issues/1#issuecomment-490469454
"""
import os
import math
import numpy as np
import p... | 16,451 | 47.674556 | 213 | py |
stylegan-encoder | stylegan-encoder-master/train_resnet.py | """
Trains a modified Resnet to generate approximate dlatents using examples from a trained StyleGAN.
Props to @SimJeg on GitHub for the original code this is based on, from this thread: https://github.com/Puzer/stylegan-encoder/issues/1#issuecomment-490469454
"""
import os
import math
import numpy as np
import pickle
... | 13,439 | 49.337079 | 289 | py |
stylegan-encoder | stylegan-encoder-master/align_images.py | import os
import sys
import bz2
import argparse
from keras.utils import get_file
from ffhq_dataset.face_alignment import image_align
from ffhq_dataset.landmarks_detector import LandmarksDetector
import multiprocessing
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
def unpack_... | 3,050 | 48.209677 | 200 | py |
stylegan-encoder | stylegan-encoder-master/swa.py | """
Stochastic Weight Averaging: https://arxiv.org/abs/1803.05407
See: https://github.com/kristpapadopoulos/keras-stochastic-weight-averaging
"""
import os
import glob
import pickle
import argparse
from dnnlib.tflib import init_tf
filepath = 'output.pkl'
def fetch_models_from_files(model_list):
for fn in model_li... | 2,025 | 31.15873 | 139 | py |
stylegan-encoder | stylegan-encoder-master/encode_images.py | import os
import argparse
import pickle
from tqdm import tqdm
import PIL.Image
from PIL import ImageFilter
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import config
from encoder.generator_model import Generator
from encoder.perceptual_model import PerceptualModel, load_images
#from tensorflow.keras.mo... | 15,281 | 62.14876 | 211 | py |
stylegan-encoder | stylegan-encoder-master/encoder/perceptual_model.py | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
#import tensorflow_probability as tfp
#tf.enable_eager_execution()
import os
import bz2
import PIL.Image
from PIL import ImageFilter
import numpy as np
from keras.models import Model
from keras.utils import get_f... | 15,587 | 49.775244 | 192 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/model.py | from typing import Sequence
import gym
import numpy as np
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.modules.noisy_layer import NoisyLayer
from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.fra... | 4,341 | 37.087719 | 101 | py |
iglu-2021-builder-baseline-rllib | iglu-2021-builder-baseline-rllib-main/custom_agent.py | import tensorflow as tf
import torch
import gym
from copy import deepcopy as copy
from gym import spaces
import ray
import numpy as np
from torch._C import Value
import yaml
from wrappers import FakeIglu
from train import build_env, register_models
from ray.rllib.agents.registry import get_trainer_class
CONFIG_FIL... | 2,851 | 35.101266 | 92 | py |
SPIGA | SPIGA-main/spiga/eval/results_gen.py | import pkg_resources
import json
import copy
import torch
import spiga.data.loaders.dl_config as dl_cfg
import spiga.data.loaders.dataloader as dl
import spiga.inference.pretreatment as pretreat
from spiga.inference.framework import SPIGAFramework
from spiga.inference.config import ModelConfig
def main():
import... | 3,346 | 37.034091 | 123 | py |
SPIGA | SPIGA-main/spiga/models/spiga.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import spiga.models.gnn.pose_proj as pproj
from spiga.models.cnn.cnn_multitask import MultitaskCNN
from spiga.models.gnn.step_regressor import StepRegressor, RelativePositionEncoder
class SPIGA(nn.Module):
def __init__(self, num_landmarks=98, num... | 6,704 | 37.982558 | 125 | py |
SPIGA | SPIGA-main/spiga/models/cnn/layers.py | from torch import nn
class Conv(nn.Module):
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False, relu=True):
super(Conv, self).__init__()
self.inp_dim = inp_dim
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size - 1) // 2, bias=False)
... | 2,619 | 31.75 | 112 | py |
SPIGA | SPIGA-main/spiga/models/cnn/coord_conv.py | import torch
import torch.nn as nn
class AddCoordsTh(nn.Module):
def __init__(self, x_dim=64, y_dim=64, with_r=False):
super(AddCoordsTh, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.with_r = with_r
xx_channel, yy_channel = self._prepare_coords()
sel... | 2,053 | 33.813559 | 92 | py |
SPIGA | SPIGA-main/spiga/models/cnn/hourglass.py | import torch.nn as nn
from spiga.models.cnn.layers import Conv, Deconv, Residual
class Hourglass(nn.Module):
def __init__(self, n, f, bn=None, increase=0):
super(Hourglass, self).__init__()
nf = f + increase
self.up1 = Residual(f, f)
# Lower branch
self.pool1 = Conv(f, f, ... | 1,575 | 28.185185 | 75 | py |
SPIGA | SPIGA-main/spiga/models/cnn/transform_e2p.py | import torch
from torch import nn
class E2Ptransform(nn.Module):
"""Edge to Points trasnformation"""
def __init__(self, points, edges, out_dim=64):
super(E2Ptransform, self).__init__()
self.ones = nn.parameter.Parameter(torch.ones((1, out_dim, out_dim)), requires_grad=False)
edge_matri... | 16,877 | 64.418605 | 107 | py |
SPIGA | SPIGA-main/spiga/models/cnn/cnn_multitask.py | from torch import nn
from spiga.models.cnn.layers import Conv, Residual
from spiga.models.cnn.hourglass import HourglassCore
from spiga.models.cnn.coord_conv import AddCoordsTh
from spiga.models.cnn.transform_e2p import E2Ptransform
class MultitaskCNN(nn.Module):
def __init__(self, nstack=4, num_landmarks=98, nu... | 4,196 | 43.178947 | 139 | py |
SPIGA | SPIGA-main/spiga/models/gnn/step_regressor.py | import torch.nn as nn
from spiga.models.gnn.layers import MLP
from spiga.models.gnn.gat import GAT
class StepRegressor(nn.Module):
def __init__(self, input_dim: int, feature_dim: int, nstack=4, decoding=[256, 128, 64, 32]):
super(StepRegressor, self).__init__()
assert nstack > 0
self.nst... | 1,423 | 31.363636 | 96 | py |
SPIGA | SPIGA-main/spiga/models/gnn/layers.py | from torch import nn
def MLP(channels: list):
n = len(channels)
layers = []
for i in range(1, n):
layers.append(nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.ReLU())
r... | 349 | 25.923077 | 88 | py |
SPIGA | SPIGA-main/spiga/models/gnn/gat.py | from copy import deepcopy
import torch
from torch import nn
import torch.nn.functional as F
from spiga.models.gnn.layers import MLP
class GAT(nn.Module):
def __init__(self, input_dim: int, output_dim: int, num_heads=4):
super().__init__()
num_heads_in = num_heads
self.reshape = None
... | 2,284 | 35.269841 | 92 | py |
SPIGA | SPIGA-main/spiga/models/gnn/pose_proj.py | import torch
import math
def euler_to_rotation_matrix(euler):
# http://euclideanspace.com/maths/geometry/rotations/conversions/eulerToMatrix/index.htm
# Change coordinates system
euler[:, 0] = -(euler[:, 0]-90)
euler[:, 1] = -euler[:, 1]
euler[:, 2] = -(euler[:, 2]+90)
# Convert to radians
... | 2,046 | 25.24359 | 92 | py |
SPIGA | SPIGA-main/spiga/data/loaders/alignments.py | import os
import json
import cv2
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from spiga.data.loaders.transforms import get_transformers
class AlignmentsDataset(Dataset):
'''Loads datasets of images with landmarks and bounding boxes.
'''
... | 5,540 | 33.849057 | 118 | py |
SPIGA | SPIGA-main/spiga/data/loaders/dataloader.py | from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import spiga.data.loaders.alignments as zoo_alignments
zoos = [zoo_alignments]
def get_dataset(data_config, pretreat=None, debug=False):
for zoo in zoos:
dataset = zoo.get_dataset(data_config, pretreat=p... | 1,360 | 31.404762 | 105 | py |
SPIGA | SPIGA-main/spiga/data/loaders/transforms.py | import cv2
import numpy as np
import torch
from spiga.data.loaders.augmentors.modern_posit import PositPose
from spiga.data.loaders.augmentors.heatmaps import Heatmaps
from spiga.data.loaders.augmentors.boundary import AddBoundary
from spiga.data.loaders.augmentors.landmarks import HorizontalFlipAug, RSTAug, Occlusion... | 3,558 | 40.870588 | 109 | py |
SPIGA | SPIGA-main/spiga/data/loaders/augmentors/landmarks.py | import random
import cv2
import numpy as np
from PIL import Image
from torchvision import transforms
# My libs
import spiga.data.loaders.augmentors.utils as dlu
class HorizontalFlipAug:
def __init__(self, ldm_flip_order, prob=0.5):
self.prob = prob
self.ldm_flip_order = ldm_flip_order
def __... | 11,374 | 35.931818 | 114 | py |
SPIGA | SPIGA-main/spiga/inference/pretreatment.py | from torchvision import transforms
import numpy as np
from PIL import Image
import cv2
from spiga.data.loaders.transforms import TargetCrop, ToOpencv, AddModel3D
def get_transformers(data_config):
transformer_seq = [
Opencv2Pil(),
TargetCrop(data_config.image_size, data_config.target_dist),
... | 825 | 24.8125 | 74 | py |
SPIGA | SPIGA-main/spiga/inference/framework.py | import os
import pkg_resources
import copy
import torch
import numpy as np
# Paths
weights_path_dft = pkg_resources.resource_filename('spiga', 'models/weights')
import spiga.inference.pretreatment as pretreat
from spiga.models.spiga import SPIGA
from spiga.inference.config import ModelConfig
class SPIGAFramework:
... | 5,368 | 37.905797 | 120 | py |
ReconVAT | ReconVAT-master/train_baseline_Thickstun.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
fro... | 5,952 | 36.677215 | 142 | py |
ReconVAT | ReconVAT-master/evaluate.py | import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import Summa... | 3,416 | 34.226804 | 117 | py |
ReconVAT | ReconVAT-master/train_UNet_Onset_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader
from tqdm import t... | 7,486 | 41.782857 | 219 | py |
ReconVAT | ReconVAT-master/transcribe_files.py | import pickle
import os
import numpy as np
from model import *
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
ex = Experiment('transcription')
def transcribe2midi(data, model, model_type, onset_threshold=0.5, frame_threshold=0.5, s... | 2,933 | 36.615385 | 182 | py |
ReconVAT | ReconVAT-master/train_baseline_Multi_Inst.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
fro... | 7,722 | 41.202186 | 230 | py |
ReconVAT | ReconVAT-master/train_baseline_Prestack.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
fro... | 6,274 | 37.030303 | 142 | py |
ReconVAT | ReconVAT-master/train_baseline_onset_frame_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, ConcatDataset
from tqdm imp... | 8,271 | 45.47191 | 136 | py |
ReconVAT | ReconVAT-master/train_UNet_VAT.py | import os
from datetime import datetime
import pickle
import numpy as np
from sacred import Experiment
from sacred.commands import print_config, save_config
from sacred.observers import FileStorageObserver
from torch.optim.lr_scheduler import StepLR, CyclicLR
from torch.utils.data import DataLoader, ConcatDataset
fro... | 8,580 | 43.926702 | 213 | py |
ReconVAT | ReconVAT-master/model/self_attention_VAT.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
fr... | 56,736 | 41.788084 | 196 | py |
ReconVAT | ReconVAT-master/model/VAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class stepwise_VAT(nn.Module):
"""
We define a function of regularization, specifically VAT.
"""
def __init__(s... | 1,478 | 32.613636 | 88 | py |
ReconVAT | ReconVAT-master/model/Unet_prestack.py | import torch
from torch.nn.functional import conv1d, mse_loss
import torch.nn.functional as F
import torch.nn as nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
batchNorm_momentum = 0.1
num_instruments = 1
class block(nn.Module):
def __init__(self, inp, out, ksiz... | 7,503 | 41.636364 | 124 | py |
ReconVAT | ReconVAT-master/model/onset_frame_VAT.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn.functional as F
from torch import nn
from nnAudio import Spectrogram
from .constants import *
from mo... | 29,455 | 39.685083 | 150 | py |
ReconVAT | ReconVAT-master/model/constants.py | import torch
SAMPLE_RATE = 16000
HOP_LENGTH = SAMPLE_RATE * 32 // 1000
ONSET_LENGTH = SAMPLE_RATE * 32 // 1000
OFFSET_LENGTH = SAMPLE_RATE * 32 // 1000
HOPS_IN_ONSET = ONSET_LENGTH // HOP_LENGTH
HOPS_IN_OFFSET = OFFSET_LENGTH // HOP_LENGTH
MIN_MIDI = 21
MAX_MIDI = 108
N_BINS = 229 # Default using Mel spectrograms
ME... | 570 | 20.961538 | 64 | py |
ReconVAT | ReconVAT-master/model/Thickstun_model.py | import torch
from torch.nn.functional import conv1d, mse_loss
import torch.nn.functional as F
import torch.nn as nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
class Thickstun(torch.nn.Module):
def __init__(self):
super(Thickstun, self).__init__()
... | 3,069 | 41.054795 | 122 | py |
ReconVAT | ReconVAT-master/model/helper_functions.py | import os
from model.dataset import *
from model.evaluate_functions import evaluate_wo_velocity
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.nn.utils import clip_grad_norm_
import numpy as np
# Mac users need to uncomment these two lines
import matplotlib
matplotlib.use('TkAgg')
import mat... | 31,908 | 45.44687 | 155 | py |
ReconVAT | ReconVAT-master/model/self_attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class MutliHeadAttention1D(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride=1, groups=1, position=True, bias=False):
"""kernel_size is the 1D local attention window size"""
... | 3,357 | 39.95122 | 135 | py |
ReconVAT | ReconVAT-master/model/utils.py | import sys
from functools import reduce
import torch
from PIL import Image
from torch.nn.modules.module import _addindent
def cycle(iterable):
while True:
for item in iterable:
yield item
def summary(model, file=sys.stdout):
def repr(model):
# We treat the extra repr like the su... | 3,934 | 35.775701 | 247 | py |
ReconVAT | ReconVAT-master/model/dataset.py | import json
import os
from abc import abstractmethod
from glob import glob
import sys
import pickle
import pandas as pd
import numpy as np
import soundfile
from torch.utils.data import Dataset
from tqdm import tqdm
from .constants import *
from .midi import parse_midi
class PianoRollAudioDataset(Dataset):
def ... | 22,968 | 40.914234 | 140 | py |
ReconVAT | ReconVAT-master/model/self_attenttion_model.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
fr... | 29,286 | 40.958453 | 211 | py |
ReconVAT | ReconVAT-master/model/decoding.py | import numpy as np
import torch
def extract_notes_wo_velocity(onsets, frames, onset_threshold=0.5, frame_threshold=0.5, rule='rule1'):
"""
Finds the note timings based on the onsets and frames information
Parameters
----------
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.Flo... | 4,479 | 33.198473 | 134 | py |
ReconVAT | ReconVAT-master/model/UNet_onset.py | """
A rough translation of Magenta's Onsets and Frames implementation [1].
[1] https://github.com/tensorflow/magenta/blob/master/magenta/models/onsets_frames_transcription/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from nnAudio import Spectrogram
fr... | 25,563 | 45.144404 | 196 | py |
ReconVAT | ReconVAT-master/model/Spectrogram.py | """
Module containing all the spectrogram classes
"""
# 0.2.0
import torch
import torch.nn as nn
from torch.nn.functional import conv1d, conv2d, fold
import scipy # used only in CFP
import numpy as np
from time import time
from nnAudio.librosa_functions import *
from nnAudio.utils import *
sz_float = 4 # size... | 96,009 | 41.976723 | 401 | py |
ReconVAT | ReconVAT-master/model/Segmentation.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.nn.init as init
import numpy as np
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
def _l2_normalize(d, binwise):
# input shape (batch, timesteps, bins, ?)... | 25,776 | 39.15109 | 156 | py |
DFMGAN | DFMGAN-main/legacy.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and re... | 16,502 | 50.411215 | 154 | py |
DFMGAN | DFMGAN-main/style_mixing.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and rel... | 4,891 | 40.109244 | 132 | py |
DFMGAN | DFMGAN-main/projector.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and rel... | 8,990 | 41.211268 | 136 | py |
DFMGAN | DFMGAN-main/generate.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and rel... | 10,000 | 45.300926 | 150 | py |
DFMGAN | DFMGAN-main/gen_gif_dfmgan.py | """Generate GIF using pretrained network pickle."""
import os
import click
import dnnlib
import numpy as np
from PIL import Image
import torch
import legacy
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle ... | 5,711 | 41.947368 | 170 | py |
DFMGAN | DFMGAN-main/generate_gif.py | """Generate GIF using pretrained network pickle."""
import os
import click
import dnnlib
import numpy as np
from PIL import Image
import torch
import legacy
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle ... | 5,303 | 43.571429 | 196 | py |
DFMGAN | DFMGAN-main/train.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and rel... | 29,176 | 44.095827 | 192 | py |
DFMGAN | DFMGAN-main/calc_metrics.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and rel... | 9,992 | 43.413333 | 182 | py |
DFMGAN | DFMGAN-main/training/loss.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and re... | 18,203 | 56.974522 | 266 | py |
DFMGAN | DFMGAN-main/training/augment.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and re... | 26,373 | 60.050926 | 366 | py |
DFMGAN | DFMGAN-main/training/dataset.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and re... | 8,683 | 35.334728 | 159 | py |
DFMGAN | DFMGAN-main/training/networks.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and re... | 49,430 | 50.544317 | 199 | py |
DFMGAN | DFMGAN-main/training/training_loop.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and re... | 31,481 | 53.27931 | 184 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.