repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
trf-sg2im | trf-sg2im-main/modules/pos_enc.py | import math
import dgl
import numpy as np
import torch
from scipy import sparse as sp
from torch import nn
class SinePositionalEncoding(nn.Module):
def __init__(self, emb_size, dropout=0.1, max_len=10):
super(SinePositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
s... | 1,767 | 30.571429 | 75 | py |
trf-sg2im | trf-sg2im-main/modules/blocks.py | # Code borrowed by https://github.com/CompVis/taming-transformers/blob/master/taming/modules/diffusionmodules/model.py
import numpy as np
import torch
import torch.nn as nn
def nonlinearity(x):
# swish
return x*torch.sigmoid(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_ch... | 13,898 | 36.666667 | 122 | py |
trf-sg2im | trf-sg2im-main/modules/vqvae/modules.py | from math import log2, sqrt
import torch.nn.functional as F
from einops import rearrange
from torch import einsum, nn
from .functions import vq, vq_st
def to_scalar(arr):
if type(arr) == list:
return [x.item() for x in arr]
else:
return arr.item()
def weights_init(m):
classname = m.__c... | 6,166 | 27.419355 | 71 | py |
trf-sg2im | trf-sg2im-main/modules/vqvae/functions.py | import torch
from einops import rearrange
from torch.autograd import Function
class VectorQuantization(Function):
@staticmethod
def forward(ctx, inputs, codebook):
with torch.no_grad():
b, h, w, _ = inputs.size()
inputs_flatten = rearrange(inputs, 'b h w e -> (b h w) e')
... | 2,629 | 35.527778 | 92 | py |
trf-sg2im | trf-sg2im-main/modules/vqvae/vqgan.py | # Code borrowed from https://github.com/CompVis/taming-transformers/blob/master/taming/models/vqgan.py
import torch
import torch.nn.functional as F
from torch import nn
from modules.blocks import Decoder, Encoder
from modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
class VQModel(nn.Module):
de... | 3,600 | 33.961165 | 102 | py |
trf-sg2im | trf-sg2im-main/modules/vqvae/quantize.py | # Code from https://github.com/CompVis/taming-transformers/blob/master/taming/modules/vqvae/quantize.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
class VectorQuantizer(nn.Module):
"""
see https://github.com/Mish... | 18,589 | 38.978495 | 110 | py |
trf-sg2im | trf-sg2im-main/data_modules/base.py | import os
from pathlib import Path
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from utils.visualize import *
from data_modules.loader import *
class BaseDataModule:
def __init__(self, data_dir="data"):
self.data_dir = Path(data_dir)
def train_dataloade... | 1,473 | 46.548387 | 123 | py |
trf-sg2im | trf-sg2im-main/data_modules/vg.py | import json
import os
import random
from functools import partial
from pathlib import Path
import h5py
import numpy as np
import PIL
import torch
import torchvision.transforms as T
from torch.utils.data import Dataset
from utils.data import *
from utils.sg2im.utils import Resize
from data_modules.base import BaseData... | 10,264 | 36.327273 | 79 | py |
trf-sg2im | trf-sg2im-main/data_modules/clevr.py | # Implementation from https://github.com/roeiherz/CanonicalSg2Im
import collections
import json
import os
import pickle
from functools import partial
import PIL
from einops import rearrange
from torch.utils.data import Dataset
from utils.data import *
from data_modules.base import BaseDataModule
from data_modules.loa... | 15,759 | 34.257271 | 128 | py |
trf-sg2im | trf-sg2im-main/data_modules/coco.py | import json
import math
import os
import random
from collections import defaultdict
from functools import partial
from pathlib import Path
import cv2
import numpy as np
import PIL
import pycocotools.mask as mask_utils
import torch
import torchvision.transforms as T
from skimage.transform import resize as imresize
from... | 27,183 | 40.375951 | 128 | py |
StyleFool | StyleFool-main/attack_prepare.py | import os
import numpy as np
import torch
import argparse
from model_init import model_initial
from generate_attack import target_attack, untarget_attack
parser = argparse.ArgumentParser(description='StyleFool_attack_prepare')
parser.add_argument('--model', type=str, default='C3D', choices=['C3D', 'I3D'], help='the at... | 2,389 | 41.678571 | 124 | py |
StyleFool | StyleFool-main/prepare.py | import os
import torch
import argparse
from utils.utils import calculate_color, select_style_target, select_style_untarget, calculate_superposition
from utils.preprocess import generate_styles
from model_init import model_initial
parser = argparse.ArgumentParser(description='StyleFool_prepare')
parser.add_argument('--... | 3,332 | 46.614286 | 151 | py |
StyleFool | StyleFool-main/pytorch_i3d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import sys
from collections import OrderedDict
class MaxPool3dSamePadding(nn.MaxPool3d):
def compute_pad(self, dim, s):
if s % self.stride[dim] == 0:
return m... | 13,567 | 40.619632 | 145 | py |
StyleFool | StyleFool-main/model_init.py | import torch
from models import C3D
from model_wrapper.vid_model_top_k import I3D_K_Model, C3D_K_Model
from pytorch_i3d import InceptionI3d
def model_initial(model, dataset, device):
if model == 'C3D' and dataset == 'UCF101':
model = C3D(num_classes=101, pretrained=False).cuda().to(device)
checkpo... | 1,482 | 37.025641 | 72 | py |
StyleFool | StyleFool-main/models.py | import torch
import torch.nn as nn
# C3D Model
class C3D(nn.Module):
def __init__(self, num_classes, pretrained=False):
super(C3D, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
... | 4,493 | 40.229358 | 93 | py |
StyleFool | StyleFool-main/attacking.py | import logging
import os
import sys
import numpy as np
import torch
from attack.video_attack import targeted_video_attack, untargeted_video_attack
from model_wrapper.vid_model_top_k import C3D_K_Model
from utils.args_attack import video_attack_args_parse
from models import C3D
def main():
args = video_attack_args_... | 2,940 | 41.014286 | 104 | py |
StyleFool | StyleFool-main/generate_attack.py | import os
import numpy as np
import torch
def target_attack(model, class_info, npy_path, styled_npy_path, output_npy_path, gpu=0):
npy_path_class = sorted(os.listdir(npy_path))
npy_info = {}
for subdir in npy_path_class:
sub_path = npy_path + subdir + "/"
sub_sub_npy = sorted(os.listdir(su... | 3,165 | 46.253731 | 115 | py |
StyleFool | StyleFool-main/attack/video_attack.py | import collections
import logging
import numpy as np
import torch
def apply_NES(model, vid, n, sigma, target_class, rank_transform, sub_num, untargeted):
with torch.no_grad():
grads = torch.zeros(vid.size(), device='cuda')
count_in = 0
loss_total = 0
logging.info('sampling....')
... | 10,010 | 41.6 | 129 | py |
StyleFool | StyleFool-main/model_wrapper/vid_model_top_k.py | import torch
import torch.nn as nn
import numpy as np
class InceptionI3D_K_Model():
def __init__(self, model):
self.k = 1
self.model = model
def set_k(self, k):
self.k = k
def preprocess(self, vid):
vid_t = vid.clone()
vid_t.mul_(2).sub_(1)
vid_t = vid_t.pe... | 3,262 | 30.07619 | 93 | py |
StyleFool | StyleFool-main/utils/utils.py | import random
import math
import os
import numpy as np
import torch.nn as nn
import torch
import json
import cv2
from utils.color import *
R = 100
angle = 30
h0 = R * math.cos(angle / 180 * math.pi)
r0 = R * math.sin(angle / 180 * math.pi)
def video_to_images(path, crop_size=112):
video = cv2.VideoCapture(path)
... | 17,477 | 39.932084 | 120 | py |
f3 | f3-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# f3 documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 7 17:21:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All co... | 5,903 | 29.590674 | 144 | py |
Deep-Metric-Learning-CVPR16 | Deep-Metric-Learning-CVPR16-master/code/compute_googlenet_distance_matrix_cuda_embeddings_liftedstructsim_softmax_pair_m128.py | #!/usr/bin/python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import caffe
import scipy.io as io
import sys
assert len(sys.argv)==3, "Incorrect no. of inputs. Provide embedding dimension and baselr."
embedding_dimension = int(sys.argv[1])
baselr = sys.argv[2]
print 'Embedding dim: %d' % embed... | 1,786 | 27.365079 | 170 | py |
COVID-19-forecasting | COVID-19-forecasting-master/COVID-19/experiment.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Reduce tensorflow messages.
import logging
import tensorflow as tf
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
import model
class Experiment():
def __init__(self, val_scalers, test_scalers):
self.logger = loggi... | 7,929 | 48.874214 | 129 | py |
COVID-19-forecasting | COVID-19-forecasting-master/COVID-19/model.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class EncoderBlock(layers.Layer):
"""
Encoder block that takes as input a time series and a numerial representation of a county name
and creates a learned representation to be processed further in the model.
"""
... | 2,568 | 40.435484 | 122 | py |
XVFI | XVFI-main/main.py | import argparse, os, shutil, time, random, torch, cv2, datetime, torch.utils.data, math
import torch.backends.cudnn as cudnn
import torch.optim as optim
import numpy as np
from torch.autograd import Variable
from utils import *
from XVFInet import *
from collections import Counter
def parse_args():
desc = "PyTor... | 21,352 | 50.083732 | 253 | py |
XVFI | XVFI-main/XVFInet.py | import functools, random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class XVFInet(nn.Module):
def __init__(self, args):
super(XVFInet, self).__init__()
self.args = args
self.device = torch.device('cuda:' + str(args.gpu) if torch.c... | 17,194 | 33.597586 | 222 | py |
XVFI | XVFI-main/utils.py | from __future__ import division
import os, glob, sys, torch, shutil, random, math, time, cv2
import numpy as np
import torch.utils.data as data
import torch.nn as nn
import pandas as pd
import torch.nn.functional as F
from datetime import datetime
from torch.nn import init
from skimage.measure import compare_ssim
from ... | 40,588 | 41.236212 | 162 | py |
image2reverb | image2reverb-main/test.py | import os
import json
import argparse
import numpy
import torch
import seaborn
import soundfile
import matplotlib
from pytorch_lightning import Trainer, loggers
from image2reverb.model import Image2Reverb
from image2reverb.dataset import Image2ReverbDataset
from matplotlib import pyplot
def main():
parser = argpa... | 3,856 | 42.829545 | 163 | py |
image2reverb | image2reverb-main/train.py | import os
import argparse
import torch
from pytorch_lightning import Trainer, loggers
from pytorch_lightning.callbacks import ModelCheckpoint
from image2reverb.model import Image2Reverb
from image2reverb.dataset import Image2ReverbDataset
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--n... | 3,557 | 42.925926 | 163 | py |
image2reverb | image2reverb-main/test_nn.py | import os
import shutil
import json
import argparse
import numpy
import tqdm
import sklearn
import torch
import seaborn
import soundfile
import matplotlib
from image2reverb.model import Image2Reverb
from image2reverb.dataset import Image2ReverbDataset
from matplotlib import pyplot
def main():
parser = argparse.Ar... | 4,617 | 33.721805 | 163 | py |
image2reverb | image2reverb-main/scripts/training_distribution.py | import sys
import os
import torch
import torchvision.models as models
import torchvision.transforms as transforms
from PIL import Image
def main():
m = sys.argv[1]
image_dir = sys.argv[2]
categories = sys.argv[3]
places = ["Large Hall", "Studio", "Medium Hall", "Outdoor", "Small Space", "Home Entryway... | 1,591 | 32.87234 | 152 | py |
image2reverb | image2reverb-main/scripts/process_img.py | import os
import csv
import tensorflow as tf
import PIL
import PIL.Image
import pathlib
import matplotlib.pyplot as plt
path_data = './dataset'
path_std_data = './standardized_data'
directories = os.listdir(path_data)
img_height = 256
img_width = 256
# Logs all of the data set into the .csv
with open('data_log.csv'... | 1,751 | 34.755102 | 136 | py |
image2reverb | image2reverb-main/scripts/make_img_with_depth.py | import sys
sys.path.append("../")
import os
import argparse
import numpy
import torch
import matplotlib
from matplotlib import pyplot
from model.data_loader import CreateDataLoader
from model.networks import Encoder
def main():
args = argparse.ArgumentParser().parse_args()
args.resize_or_crop = "scale_width_a... | 1,774 | 26.307692 | 131 | py |
image2reverb | image2reverb-main/scripts/interpretation/compare_models.py | import sys
import torch
import torchvision
def main():
m1, m2 = map(load_model, sys.argv[1:3])
d = compare_models(m1, m2)
print("\n%d differences in total." % d)
def load_model(model_path):
model = torchvision.models.resnet50(num_classes=365)
c = torch.load(model_path, map_location="cpu")
st... | 1,099 | 27.205128 | 142 | py |
image2reverb | image2reverb-main/scripts/interpretation/gradcam.py | import os
import argparse
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models
class FeatureExtractor():
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, target_layers):... | 9,720 | 33.594306 | 142 | py |
image2reverb | image2reverb-main/image2reverb/stft.py | import numpy
import torch
import librosa
class STFT(torch.nn.Module):
def __init__(self):
super().__init__()
self._eps = 1e-8
def transform(self, audio):
m = numpy.abs(librosa.stft(audio/numpy.abs(audio).max(), 1024, 256))[:-1,:]
m = numpy.log(m + self._eps)
m = (((m -... | 854 | 34.625 | 114 | py |
image2reverb | image2reverb-main/image2reverb/model.py | import os
import json
import numpy
import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torchvision
import pyroomacoustics
from .networks import Encoder, Generator, Discriminator
from .stft import STFT
from .mel import LogMel
from .util import compare_t60
# Hyperpara... | 9,123 | 42.241706 | 194 | py |
image2reverb | image2reverb-main/image2reverb/dataset.py | import os
import soundfile
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from PIL import Image
from .stft import STFT
from .mel import LogMel
F_EXTENSIONS = [
".jpg", ".JPG", ".jpeg", ".JPEG",
".png", ".PNG", ".ppm", ".PPM", ".bmp", ".BMP", ".tiff", ".wav", ".WAV", ".aif", "... | 1,975 | 27.637681 | 111 | py |
image2reverb | image2reverb-main/image2reverb/networks.py | import os
import numpy
import torch
import torch.nn as nn
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
from .layers import PixelWiseNormLayer, MiniBatchAverageLayer, EqualizedLearningRateLayer, Conv3x3, ConvBlock, upsample
class Encoder(nn.Module):
... | 16,499 | 46.826087 | 208 | py |
image2reverb | image2reverb-main/image2reverb/layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import kaiming_normal_, calculate_gain
class PixelWiseNormLayer(nn.Module):
"""PixelNorm layer. Implementation is from https://github.com/shanexn/pytorch-pggan."""
def __init__(self):
super().__init__()
def forw... | 2,948 | 32.134831 | 137 | py |
image2reverb | image2reverb-main/image2reverb/util.py | import os
import math
import numpy
import torch
import torch.fft
from PIL import Image
def compare_t60(a, b, sr=86):
try:
a = a.detach().clone().abs()
b = b.detach().clone().abs()
a = (a - a.min())/(a.max() - a.min())
b = (b - b.min())/(b.max() - b.min())
t_a = estimate_t60... | 5,855 | 33.857143 | 132 | py |
image2reverb | image2reverb-main/image2reverb/mel.py | import numpy
import torch
import librosa
class LogMel(torch.nn.Module):
def __init__(self):
super().__init__()
self._eps = 1e-8
def transform(self, audio):
m = librosa.feature.melspectrogram(audio/numpy.abs(audio).max())
m = numpy.log(m + self._eps)
return torch.Tensor... | 651 | 30.047619 | 104 | py |
fusion-dance | fusion-dance-main/vq_vae.py | """
Primary code to train the Pixel VQ-VAE.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae
seed = 42
np.rando... | 11,818 | 31.29235 | 91 | py |
fusion-dance | fusion-dance-main/baseline_conditional_gated_pixelcnn.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, gated_pixelcnn
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
############... | 9,243 | 32.132616 | 108 | py |
fusion-dance | fusion-dance-main/gated_pixelcnn_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, gated_pixelcnn
seed = 42
np.random.seed(seed)
_ = torch.manual_se... | 9,555 | 31.175084 | 91 | py |
fusion-dance | fusion-dance-main/baseline_gated_pixelcnn.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, gated_pixelcnn
seed = 42
np.random.seed(seed)
_ = torch.manual_se... | 8,036 | 30.517647 | 91 | py |
fusion-dance | fusion-dance-main/dc_gan.py | # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyp... | 9,571 | 33.555957 | 144 | py |
fusion-dance | fusion-dance-main/conv_autoencoder.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from IPython.display import HTML
from matplotlib import animation, colors
from PIL import Image
import pytorch_msssim
from torchvision import transforms
from tqdm import tqdm
import utils.data as data
import utils.graphics... | 9,435 | 30.986441 | 80 | py |
fusion-dance | fusion-dance-main/conditional_gated_pixelcnn_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, gated_pixelcnn
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
############... | 10,805 | 32.663551 | 108 | py |
fusion-dance | fusion-dance-main/conv_vae.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vae
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
########... | 11,843 | 32.269663 | 80 | py |
fusion-dance | fusion-dance-main/transformer_prior.py | """
Trains a transformer prior to generate images using a VQ-VAE's encodings.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
from models import vqvae
from transformers import GPT2LMHe... | 12,816 | 33.087766 | 87 | py |
fusion-dance | fusion-dance-main/models/gated_pixelcnn.py | # Adapted from: https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial12/Autoregressive_Image_Modeling.html
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaskedConvolution(nn.Module):
def __init__(self, c_in, c_out, mask,... | 24,133 | 40.682211 | 130 | py |
fusion-dance | fusion-dance-main/models/vqvae.py | import torch
import torch.nn as nn
import numpy as np
# https://nbviewer.jupyter.org/github/zalandoresearch/pytorch-vq-vae/blob/master/vq-vae.ipynb
class VectorQuantizerEMA(nn.Module):
def __init__(
self, num_embeddings, embedding_dim, commitment_cost, decay=0.0, epsilon=1e-5
):
super(VectorQua... | 9,325 | 35.147287 | 93 | py |
fusion-dance | fusion-dance-main/models/cnn_discriminator.py | import torch
import torch.nn as nn
import numpy as np
class CNNDiscriminator(nn.Module):
def __init__(self, input_channels, input_dim, num_filters, num_layers):
super(CNNDiscriminator, self).__init__()
channel_sizes = self.calculate_channel_sizes(
input_channels, num_filters, num_layer... | 1,489 | 32.863636 | 88 | py |
fusion-dance | fusion-dance-main/models/utils.py | import torch
import torch.nn as nn
import numpy as np
def get_freezable_layers(model):
# Freeze Conv Layers
freezable_layers = []
for layer in model.encoder:
if "Linear" not in str(layer):
freezable_layers.append(layer)
for layer in model.decoder:
if "Linear" not in str(lay... | 1,064 | 27.026316 | 49 | py |
fusion-dance | fusion-dance-main/models/classifier.py | import torch
import torch.nn as nn
import numpy as np
class CNNMultiClassClassifier(nn.Module):
def __init__(
self,
num_layers,
max_filters,
num_output_classes,
input_dimension=64,
input_channels=1,
):
super(CNNMultiClassClassifier, self).__init__()
... | 2,534 | 30.296296 | 87 | py |
fusion-dance | fusion-dance-main/models/vae_gan.py | import torch
import torch.nn as nn
import numpy as np
class VAEGANEncoder(nn.Module):
def __init__(
self,
image_channels=3,
max_filters=512,
num_layers=4,
kernel_size=2,
stride=2,
padding=0,
latent_dim=128,
input_image_dimensions=96,
... | 9,089 | 33.694656 | 86 | py |
fusion-dance | fusion-dance-main/models/vae.py | import torch
import torch.nn as nn
import numpy as np
# Ref: https://github.com/sksq96/pytorch-vae/blob/master/vae-cnn.ipynb
class ConvolutionalVAE(nn.Module):
def __init__(
self,
image_channels=3,
max_filters=512,
num_layers=4,
kernel_size=2,
stride=2,
paddi... | 17,557 | 35.655532 | 92 | py |
fusion-dance | fusion-dance-main/models/gan.py | from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class DCGANGenerator(nn.Module):
def __init__(self, latent_dim, num_filters, num_output_channels):
super(DCGANGenerator, self).__init__()
self.main = nn.Sequential(
# input is l... | 4,277 | 30.688889 | 69 | py |
fusion-dance | fusion-dance-main/models/autoencoder.py | import torch
import torch.nn as nn
import numpy as np
class ConvolutionalAE(nn.Module):
def __init__(
self,
image_channels=3,
max_filters=512,
num_layers=4,
kernel_size=2,
stride=2,
padding=0,
latent_dim=128,
input_image_dimensions=96,
... | 10,834 | 35.60473 | 86 | py |
fusion-dance | fusion-dance-main/models/cnn_rnn.py | import torch
import torch.nn as nn
import numpy as np
class CNN_RNN(nn.Module):
def __init__(
self,
num_classes,
input_image_size=64,
input_channels=3,
cnn_output_channels=512,
cnn_blocks=4,
rnn_hidden_size=512,
rnn_bidirectional=False,
rnn_t... | 8,039 | 34.10917 | 88 | py |
fusion-dance | fusion-dance-main/models/cnn_enhancer.py | import torch
import torch.nn as nn
import numpy as np
class ImageEnhancerCNN(nn.Module):
def __init__(self, input_channels, num_filters, num_layers, use_4by4=False):
super(ImageEnhancerCNN, self).__init__()
self.use_4by4 = use_4by4
if num_layers < 2:
raise ValueError("Model sho... | 2,241 | 31.970588 | 86 | py |
fusion-dance | fusion-dance-main/models/cnn_prior.py | import torch
import torch.nn as nn
import numpy as np
class CNNPrior(nn.Module):
def __init__(self, input_channels, output_channels, input_dim, output_dim):
super(CNNPrior, self).__init__()
num_layers = self.get_number_of_layers(input_dim, output_dim)
channel_sizes = self.calculate_channel... | 6,981 | 31.175115 | 86 | py |
fusion-dance | fusion-dance-main/scripts/conditional_pixelcnn_generate.py | """
Generates an arbitrary number of outputs from the given conditional Pixel CNN
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import sys
sys.path.append("./")
import utils.data as data
from models import vqvae, gated_pixelcnn
seed = 42
n... | 4,174 | 34.683761 | 135 | py |
fusion-dance | fusion-dance-main/scripts/compare_model_outputs.py | """
Compares generated sprites of the specified models
Does NOT load models or generate sprites.
Only exception is the base model.
"""
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os
import sys
import torch
sys.path.append("./")
from models import vqvae
from utils import data, graph... | 5,245 | 30.22619 | 79 | py |
fusion-dance | fusion-dance-main/scripts/vae_generate.py | """
Generate images using a VAE.
"""
import os
import sys
import numpy as np
import pandas as pd
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
sys.path.append("./")
import utils.data as data
from models import vae
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
# VAE Config
experimen... | 1,875 | 24.013333 | 78 | py |
fusion-dance | fusion-dance-main/scripts/compute_vqvae_embeddings.py | import os
import sys
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
sys.path.append("./")
import utils.data as data
from models import vqvae
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
################################################################################
########... | 5,345 | 32.204969 | 90 | py |
fusion-dance | fusion-dance-main/scripts/compute_vae_embeddings.py | import os
import sys
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
sys.path.append("./")
import utils.data as data
from models import vae
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
################################################################################
##########... | 4,420 | 30.578571 | 94 | py |
fusion-dance | fusion-dance-main/scripts/gan_generate.py | """
Generate images using a GAN.
"""
import os
import sys
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
sys.path.append("./")
from models import gan
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
# GAN Config
experiment_name = "pokemon_gan_v1"
epoch_to_load = 24
l... | 1,750 | 25.938462 | 78 | py |
fusion-dance | fusion-dance-main/scripts/pixelcnn_generate.py | """
Generates an arbitrary number of outputs from the given conditional Pixel CNN
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import sys
sys.path.append("./")
import utils.data as data
from models import vqvae, gated_pixelcnn
seed = 42
n... | 3,300 | 30.141509 | 87 | py |
fusion-dance | fusion-dance-main/scripts/compute_metrics_from_images.py | """
Given two directories, source and target
Compute the MSE & SSIM scores between the images in them.
"""
import os
import sys
import torch
from PIL import Image
from pytorch_msssim import ssim
from torchvision import transforms
from tqdm import tqdm
def load_images_from_dir(dir, transform):
images = []
for... | 1,144 | 22.367347 | 79 | py |
fusion-dance | fusion-dance-main/scripts/transformer_generate.py | """
Generate images using a Transformer + VQVAE.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from tqdm import tqdm
import sys
sys.path.append("./")
import utils.data as data
from models import vqvae
from transformers import GPT2LMHeadModel, GPT2Config
seed = 42
np.random.seed(seed)
_... | 4,927 | 30.793548 | 83 | py |
fusion-dance | fusion-dance-main/scripts/compute_fid.py | # No real script here.
# Including this in case I forget.
# To compute FID: python -m pytorch_fid path/to/dataset1 path/to/dataset2 | 131 | 43 | 73 | py |
fusion-dance | fusion-dance-main/scripts/.old/compare_model_fusions_prior.py | """
Disclaimer: This script contains older code and may not work as is.
Samples N images from the given data
Load the original VQ-VAE and the prior model.
Only supports CNNPrior
Generates fusions using the models and saves.
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
from P... | 8,830 | 33.631373 | 87 | py |
fusion-dance | fusion-dance-main/scripts/.old/compute_metrics_on_test_set.py | """
Disclaimer: This script contains older code and may not work as is.
Only for CNNPrior
Given the test set directory
Load model, compute outputs and calculate MSE & SSIM
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm impo... | 7,287 | 34.207729 | 88 | py |
fusion-dance | fusion-dance-main/scripts/.old/compare_model_fusions.py | """
Disclaimer: This script contains older code and may not work as is.
Samples N fusions from the given data. That is, it gets the corresponding base
and fusee images along with the fusion image. This is done N times.
Then for each model specified, it loads the model.
Generates fusions using that model and the base + ... | 9,260 | 31.957295 | 79 | py |
fusion-dance | fusion-dance-main/experiments/fusion_enhancer.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import cnn_enhancer
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
... | 8,132 | 30.894118 | 93 | py |
fusion-dance | fusion-dance-main/experiments/adversarial_finetuning.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, cnn_discriminator, cnn_prior
seed = 42
np.random.seed(seed)
_ = t... | 14,762 | 32.860092 | 88 | py |
fusion-dance | fusion-dance-main/experiments/inpainting_cnn_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, vae, cnn_prior
seed = 42
np.random.seed(seed)
_ = torch.manual_se... | 11,055 | 31.807122 | 86 | py |
fusion-dance | fusion-dance-main/experiments/fusion_cnn_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, cnn_prior
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(se... | 15,530 | 31.835095 | 88 | py |
fusion-dance | fusion-dance-main/experiments/fusion_conv_autoencoder_old.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from IPython.display import HTML
from matplotlib import animation, colors
from PIL import Image
import pytorch_msssim
from torchvision import transforms
from tqdm import tqdm
import utils.data as data
import utils.graphics... | 23,260 | 35.063566 | 95 | py |
fusion-dance | fusion-dance-main/experiments/dual_input_vae.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
import models
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
#################... | 15,206 | 34.447552 | 87 | py |
fusion-dance | fusion-dance-main/experiments/vae_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vae, vqvae
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
#######################... | 15,519 | 34.925926 | 91 | py |
fusion-dance | fusion-dance-main/experiments/vae_gan.py | # TODO: This has some issues. Needs fixing.
# Work on the MNIST version first.
# Once you get that working, come back here!
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_msssim
from tqdm import tqdm
import utils.data ... | 14,553 | 34.325243 | 88 | py |
fusion-dance | fusion-dance-main/experiments/fusion_cnn_rnn.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
from PIL import Image
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import cnn_rnn, vqvae
seed = 42
np.random.seed(seed)
_ = ... | 13,396 | 32.916456 | 88 | py |
fusion-dance | fusion-dance-main/experiments/finetune_fusion_conv_autoencoder_old.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from IPython.display import HTML
from matplotlib import animation, colors
from PIL import Image
import pytorch_msssim
from torchvision import transforms
from tqdm import tqdm
import utils.data as data
import utils.graphics... | 23,324 | 34.99537 | 95 | py |
fusion-dance | fusion-dance-main/experiments/fusion_cnn_multirnn.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
from PIL import Image
import utils.data as data
import utils.graphics as graphics
from models import cnn_rnn, vqvae
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
#... | 14,983 | 33.846512 | 87 | py |
fusion-dance | fusion-dance-main/experiments/fusion_discriminator.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
from sklearn.metrics import classification_report
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import cnn_discriminator
seed... | 7,269 | 30.336207 | 87 | py |
fusion-dance | fusion-dance-main/experiments/vae_gan_mnist.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_msssim
from tqdm import tqdm
import utils.data_mnist as data
import utils.graphics as graphics
import utils.loss as loss
from models import vae_gan
seed = 42
np.random.seed(s... | 16,389 | 35.341463 | 88 | py |
fusion-dance | fusion-dance-main/experiments/inpainting_gated_pixelcnn_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import vqvae, gated_pixelcnn
seed = 42
np.random.seed(seed)
_ = torch.manual_se... | 10,378 | 31.638365 | 86 | py |
fusion-dance | fusion-dance-main/experiments/transfer_conv_vae.py | # NOTE: The contents of this file are in all likelihood outdated
# Abandoned this approach as it didn't work out too well
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from pytorch_msssim import ssim
from tqdm import tqdm
import utils.data as data
import utils.graphi... | 12,547 | 33.952646 | 84 | py |
fusion-dance | fusion-dance-main/experiments/feedforward_classifier_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import classifier, vqvae, vae
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics impo... | 11,497 | 32.619883 | 88 | py |
fusion-dance | fusion-dance-main/experiments/dual_input_autoencoder.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
import models
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
#################... | 14,096 | 34.779188 | 87 | py |
fusion-dance | fusion-dance-main/experiments/cnn_classifier_prior.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import classifier, vqvae
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import cl... | 10,206 | 31.610224 | 85 | py |
fusion-dance | fusion-dance-main/experiments/baseline_classifier.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
from models import classifier
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classific... | 8,541 | 30.992509 | 85 | py |
fusion-dance | fusion-dance-main/experiments/distilled_vae.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import pytorch_msssim
from tqdm import tqdm
import utils.data as data
import utils.graphics as graphics
import utils.loss as loss
import models
seed = 42
np.random.seed(seed)
_ = torch.manual_seed(seed)
#################... | 13,752 | 32.300242 | 85 | py |
fusion-dance | fusion-dance-main/experiments/fusion_vae_old.py | import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from IPython.display import HTML
from matplotlib import animation, colors
from PIL import Image
import pytorch_msssim
from torchvision import transforms
from tqdm import tqdm
import utils.data as data
import utils.graphics... | 30,677 | 37.783818 | 95 | py |
fusion-dance | fusion-dance-main/experiments/markov/encodings/markov_generate.py | import sys
import os
import random
import pickle
from datetime import datetime
import numpy as np
from PIL import Image
import torch
sys.path.append("./")
import models
probabilities_file = sys.argv[1]
default_class = int(sys.argv[2]) # The background token class. 112 for 5.10
out_dir = probabilities_file.split("\\"... | 3,013 | 30.395833 | 87 | py |
fusion-dance | fusion-dance-main/utils/data_mnist.py | import torch
import torch.nn as nn
from torchvision import transforms
import numpy as np
from PIL import Image
import os
class MNISTDataset(torch.utils.data.Dataset):
def __init__(self, dataset=None, transform=None):
self.dataset = dataset
self.transform = transform
def __getitem__(self, ind... | 822 | 24.71875 | 74 | py |
fusion-dance | fusion-dance-main/utils/loss.py | import torch
import torch.nn as nn
import numpy as np
def bits_per_dimension_loss(x_pred, x):
nll = nn.functional.cross_entropy(x_pred, x, reduction="none")
bpd = nll.mean(dim=[1, 2, 3]) * np.log2(np.exp(1))
return bpd.mean()
def rmse_loss(reconstructed_x, x, use_sum=False, epsilon=1e-8):
"""
We... | 3,417 | 30.943925 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.