repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CamStyle | CamStyle-master/CycleGAN-for-CamStyle/util/util.py | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tenso... | 1,613 | 25.459016 | 85 | py |
CamStyle | CamStyle-master/CycleGAN-for-CamStyle/data/base_dataset.py | import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
def get_transform(opt)... | 1,601 | 31.693878 | 70 | py |
CamStyle | CamStyle-master/CycleGAN-for-CamStyle/data/image_folder.py | ###############################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
################################... | 1,946 | 27.217391 | 79 | py |
CamStyle | CamStyle-master/CycleGAN-for-CamStyle/data/aligned_dataset.py | import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
class AlignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
... | 2,409 | 36.076923 | 96 | py |
CamStyle | CamStyle-master/CycleGAN-for-CamStyle/data/__init__.py | import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataLoader(opt):
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
def CreateDataset(opt):
dataset = None
if opt.dataset_mode == 'aligned':
... | 1,740 | 29.017241 | 75 | py |
const_layout | const_layout-master/generate.py | import pickle
import argparse
from pathlib import Path
import torch
from torch_geometric.data import DataLoader
from torch_geometric.utils import to_dense_batch
from util import set_seed, convert_layout_to_image
from data import get_dataset
from model.layoutganpp import Generator
def main():
parser = argparse.A... | 2,951 | 32.931034 | 75 | py |
const_layout | const_layout-master/generate_const.py | import os
os.environ['OMP_NUM_THREADS'] = '1' # noqa
import pickle
import argparse
import tempfile
import subprocess
from tqdm import tqdm
from pathlib import Path
import torch
import torchvision.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.utils import to_dense_batch
from data i... | 6,963 | 33.82 | 75 | py |
const_layout | const_layout-master/util.py | import json
import random
import shutil
import numpy as np
from pathlib import Path
from datetime import datetime
from PIL import Image, ImageDraw
import torch
import torchvision.utils as vutils
import torchvision.transforms as T
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_see... | 2,792 | 25.6 | 67 | py |
const_layout | const_layout-master/eval.py | import pickle
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict
import torch
from torch_geometric.data import Data, Batch, DataLoader
from torch_geometric.utils import to_dense_batch
from data import get_dataset
from metric import LayoutFID, compute_maximum_iou, \
com... | 4,989 | 33.178082 | 76 | py |
const_layout | const_layout-master/train.py | import os
import argparse
os.environ['OMP_NUM_THREADS'] = '1' # noqa
import torch
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.utils import to_dense_batch
from torch.utils.tensorboard import SummaryWrite... | 10,248 | 39.035156 | 81 | py |
const_layout | const_layout-master/metric.py | import numpy as np
import multiprocessing as mp
from itertools import chain
from scipy.optimize import linear_sum_assignment
import torch
from torch_geometric.utils import to_dense_adj
from pytorch_fid.fid_score import calculate_frechet_distance
from model.layoutnet import LayoutNet
from util import convert_xywh_to_l... | 6,895 | 30.063063 | 80 | py |
const_layout | const_layout-master/clg/const.py | import torch
from functools import partial
from torch_geometric.utils import to_dense_adj, to_dense_batch
from util import convert_xywh_to_ltrb
from metric import compute_alignment, compute_overlap
from data.util import RelSize, RelLoc, REL_SIZE_ALPHA
def beautify_alignment(bbox_flatten, data, threshold=0.004, **kwa... | 6,679 | 28.298246 | 81 | py |
const_layout | const_layout-master/clg/auglag.py | import torch
from torch_geometric.utils import to_dense_batch
class AugLagMethod():
def __init__(self, netG, netD, inner_optimizer, constraints,
alpha=3., l0=0., m0=1., iteration=5, tolerance=1e-8,
clamp_f=True, raise_error_if_failed=False):
self.netG = netG
self.... | 4,802 | 30.807947 | 78 | py |
const_layout | const_layout-master/clg/optim.py | import cma
import torch
class AdamOptimizer():
def __init__(self, lr=0.01, iteration=200):
self.lr = lr
self.iteration = iteration
def generator(self, z, objective, **kwargs):
z = z.detach().requires_grad_(True)
optimizer = torch.optim.Adam([z], lr=self.lr)
for _ in ra... | 2,150 | 28.875 | 71 | py |
const_layout | const_layout-master/data/rico.py | import json
from pathlib import Path
import torch
from torch_geometric.data import Data
from data.base import BaseDataset
def append_child(element, elements):
if 'children' in element.keys():
for child in element['children']:
elements.append(child)
elements = append_child(child, ... | 3,370 | 27.811966 | 79 | py |
const_layout | const_layout-master/data/base.py | import torch
import seaborn as sns
from pathlib import Path
from torch_geometric.data import InMemoryDataset
class BaseDataset(InMemoryDataset):
labels = []
_label2index = None
_index2label = None
_colors = None
def __init__(self, name, split, transform):
assert split in ['train', 'val', ... | 1,794 | 28.916667 | 72 | py |
const_layout | const_layout-master/data/magazine.py | from pathlib import Path
import xml.etree.ElementTree as ET
import torch
from torch_geometric.data import Data
from data.base import BaseDataset
class Magazine(BaseDataset):
labels = [
'text',
'image',
'headline',
'text-over-image',
'headline-over-image',
]
def _... | 2,554 | 30.158537 | 79 | py |
const_layout | const_layout-master/data/publaynet.py | from pathlib import Path
from pycocotools.coco import COCO
import torch
from torch_geometric.data import Data
from data.base import BaseDataset
class PubLayNet(BaseDataset):
labels = [
'text',
'title',
'list',
'table',
'figure',
]
def __init__(self, split='train'... | 3,333 | 31.368932 | 73 | py |
const_layout | const_layout-master/data/util.py | import torch
import random
from enum import IntEnum
from itertools import product, combinations
from util import convert_xywh_to_ltrb
class RelSize(IntEnum):
UNKNOWN = 0
SMALLER = 1
EQUAL = 2
LARGER = 3
class RelLoc(IntEnum):
UNKNOWN = 4
LEFT = 5
TOP = 6
RIGHT = 7
BOTTOM = 8
... | 4,726 | 24.010582 | 70 | py |
const_layout | const_layout-master/model/layoutnet.py | import torch
import torch.nn as nn
from model.util import TransformerWithToken
class LayoutNet(nn.Module):
def __init__(self, num_label):
super().__init__()
d_model = 256
nhead = 4
num_layers = 4
max_bbox = 50
# encoder
self.emb_label = nn.Embedding(num_l... | 2,176 | 32.492308 | 87 | py |
const_layout | const_layout-master/model/util.py | import torch
import torch.nn as nn
class TransformerWithToken(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward, num_layers):
super().__init__()
self.token = nn.Parameter(torch.randn(1, 1, d_model))
token_mask = torch.zeros(1, 1, dtype=torch.bool)
self.register_buffer... | 1,058 | 28.416667 | 75 | py |
const_layout | const_layout-master/model/layoutganpp.py | import torch
import torch.nn as nn
from model.util import TransformerWithToken
class Generator(nn.Module):
def __init__(self, dim_latent, num_label,
d_model=512, nhead=8, num_layers=4):
super().__init__()
self.fc_z = nn.Linear(dim_latent, d_model // 2)
self.emb_label = n... | 3,210 | 33.159574 | 87 | py |
BDG-Net | BDG-Net-master/MyTest.py | import torch
import torch.nn.functional as F
import numpy as np
import os, argparse
import cv2
from PIL import Image
import torchvision.transforms as transforms
from metric.dice import mean_dice
gpu_list = [1]
gpu_list_str = ','.join(map(str, gpu_list))
os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_list_str)
cl... | 3,400 | 34.427083 | 114 | py |
BDG-Net | BDG-Net-master/BDM_Net.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.nn import functional as F
import pytorch_lightning as pl
from metric.dice import mean_dice
from segmentation_models_pytorch.encoders import get_encoder
from segmentation_models_pytorch.base.heads import SegmentationHead
fro... | 14,244 | 30.585366 | 93 | py |
BDG-Net | BDG-Net-master/train.py | import random
import os
import numpy as np
import torch
import argparse
import albumentations as A
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from pytorch_lightning.trainer import Trainer
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import ModelCheckp... | 3,660 | 32.281818 | 132 | py |
BDG-Net | BDG-Net-master/metric/dice.py | from pytorch_lightning.metrics import Metric
import numpy as np
import torch
def dice(preds: torch.Tensor, target: torch.Tensor, th=0.5, if_sigmoid=True):
if preds.shape != target.shape:
preds = preds.squeeze(1)
assert preds.shape == target.shape
if not isinstance(preds, torch.FloatTensor):
... | 1,313 | 23.792453 | 77 | py |
BDG-Net | BDG-Net-master/utils/dataset.py | import os
import os.path
import cv2
import numpy as np
import torch
from torchvision import transforms as T
from torch.utils.data import Dataset
from scipy.ndimage.morphology import distance_transform_edt
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from utils.util import overlay
def make_d... | 4,646 | 36.475806 | 129 | py |
BDG-Net | BDG-Net-master/utils/scheduler.py | import math
from torch.optim.lr_scheduler import LambdaLR
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSched... | 3,146 | 42.109589 | 117 | py |
BDG-Net | BDG-Net-master/utils/util.py | import numpy as np
from PIL import Image
from thop import profile
from thop import clever_format
from torch import nn
import torch.nn.init as initer
import cv2, os
def initialize_weights(*models):
"""
Initialize Model Weights
"""
for model in models:
for module in model.modules():
... | 5,871 | 33.339181 | 95 | py |
lapjv | lapjv-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# lapjv documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 5 16:52:34 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All... | 5,024 | 30.018519 | 81 | py |
AAAI2023-PVD | AAAI2023-PVD-main/main_just_train_tea.py | import torch
import os
import argparse
from just_train_tea.network import NeRFNetwork
from functools import partial
from just_train_tea.provider import NeRFDataset
from just_train_tea.utils import *
from time import time
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("pat... | 12,406 | 33.949296 | 138 | py |
AAAI2023-PVD | AAAI2023-PVD-main/main_distill_mutual.py | import torch
import os
import argparse
from distill_mutual.network import NeRFNetwork
from functools import partial
from time import time
from distill_mutual.provider import NeRFDataset
from distill_mutual.utils import *
from IPython import embed
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
... | 14,169 | 33.645477 | 138 | py |
AAAI2023-PVD | AAAI2023-PVD-main/tools/activation.py | import torch
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
class _trunc_exp(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32) # cast to float32
def forward(ctx, x):
ctx.save_for_backward(x)
return torch.exp(x)
@staticmethod
@c... | 516 | 22.5 | 61 | py |
AAAI2023-PVD | AAAI2023-PVD-main/tools/encoding.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class FreqEncoder(nn.Module):
def __init__(
self,
input_dim,
max_freq_log2,
N_freqs,
log_sampling=True,
include_input=True,
periodic_fns=(torch.sin, torch.cos),
):
super().__init... | 3,145 | 24.370968 | 87 | py |
AAAI2023-PVD | AAAI2023-PVD-main/gridencoder/setup.py | import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
_src_path = os.path.dirname(os.path.abspath(__file__))
nvcc_flags = [
"-O3",
"-std=c++14",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
]... | 1,837 | 26.848485 | 117 | py |
AAAI2023-PVD | AAAI2023-PVD-main/gridencoder/backend.py | import os
from torch.utils.cpp_extension import load
_src_path = os.path.dirname(os.path.abspath(__file__))
nvcc_flags = [
"-O3",
"-std=c++14",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
]
if os.name == "posix":
c_flags = ["-O3", "-std=c... | 1,462 | 24.666667 | 117 | py |
AAAI2023-PVD | AAAI2023-PVD-main/gridencoder/grid.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
try:
import _gridencoder as _backend
except ImportError:
from .backend import _backend
_gridtype_to_id = {
"h... | 7,467 | 31.051502 | 374 | py |
AAAI2023-PVD | AAAI2023-PVD-main/raymarching/raymarching.py | import numpy as np
import time
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
try:
import _raymarching as _backend
except ImportError:
from .backend import _backend
# ----------------------------------------
# utils
# ----------------... | 16,276 | 29.827652 | 175 | py |
AAAI2023-PVD | AAAI2023-PVD-main/raymarching/setup.py | import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
_src_path = os.path.dirname(os.path.abspath(__file__))
nvcc_flags = [
"-O3",
"-std=c++14",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
]... | 2,273 | 28.153846 | 123 | py |
AAAI2023-PVD | AAAI2023-PVD-main/raymarching/backend.py | import os
from torch.utils.cpp_extension import load
_src_path = os.path.dirname(os.path.abspath(__file__))
nvcc_flags = [
"-O3",
"-std=c++14",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
]
if os.name == "posix":
c_flags = ["-O3", "-std=c... | 1,461 | 24.649123 | 117 | py |
AAAI2023-PVD | AAAI2023-PVD-main/just_train_tea/renderer.py | import math
import trimesh
import numpy as np
from time import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import raymarching
from .utils import custom_meshgrid
def sample_pdf(bins, weights, n_samples, det=False):
# This implementation is from NeRF
# bins: [B, T], old_z_vals
... | 30,911 | 37.304833 | 262 | py |
AAAI2023-PVD | AAAI2023-PVD-main/just_train_tea/network.py | import torch
from time import time
import torch.nn as nn
import torch.nn.functional as F
from tools.encoding import get_encoder
from tools.activation import trunc_exp
from .renderer import NeRFRenderer
import raymarching
class NeRFNetwork(NeRFRenderer):
def __init__(
self,
encoding="hashgrid",
... | 23,398 | 34.133634 | 216 | py |
AAAI2023-PVD | AAAI2023-PVD-main/just_train_tea/utils.py | import os
import lpips
import glob
import tqdm
import math
import random
import warnings
import tensorboardX
import numpy as np
import pandas as pd
import time
from datetime import datetime
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.func... | 45,185 | 34.890389 | 178 | py |
AAAI2023-PVD | AAAI2023-PVD-main/just_train_tea/provider.py | import os
import cv2
import glob
import json
import tqdm
import numpy as np
from scipy.spatial.transform import Slerp, Rotation
import trimesh
import torch
from torch.utils.data import DataLoader
from .utils import get_rays, srgb_to_linear
# ref: https://github.com/NVlabs/instant-ngp/blob/b76004c8cf478880227401ae7... | 11,693 | 34.761468 | 143 | py |
AAAI2023-PVD | AAAI2023-PVD-main/distill_mutual/renderer.py | import math
import trimesh
import numpy as np
from time import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import raymarching
from .utils import custom_meshgrid
from IPython import embed
def sample_pdf(bins, weights, n_samples, det=False):
# This implementation is from NeRF
# bin... | 31,012 | 37.052761 | 262 | py |
AAAI2023-PVD | AAAI2023-PVD-main/distill_mutual/network.py | import torch
from time import time
import torch.nn as nn
import torch.nn.functional as F
from tools.encoding import get_encoder
from tools.activation import trunc_exp
from .renderer import NeRFRenderer
import raymarching
class NeRFNetwork(NeRFRenderer):
def __init__(
self,
encoding="hashgrid",
... | 24,261 | 34.47076 | 216 | py |
AAAI2023-PVD | AAAI2023-PVD-main/distill_mutual/utils.py | import os
import copy
import lpips
import glob
import tqdm
import math
import random
import warnings
import tensorboardX
import numpy as np
import pandas as pd
import imageio
import time
from datetime import datetime
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim a... | 64,507 | 36.395942 | 262 | py |
AAAI2023-PVD | AAAI2023-PVD-main/distill_mutual/provider.py | import os
import cv2
import glob
import json
import tqdm
import numpy as np
from scipy.spatial.transform import Slerp, Rotation
import trimesh
import torch
from torch.utils.data import DataLoader
from .utils import get_rays, srgb_to_linear
# ref: https://github.com/NVlabs/instant-ngp/blob/b76004c8cf478880227401ae7... | 11,693 | 34.761468 | 143 | py |
AAAI2023-PVD | AAAI2023-PVD-main/shencoder/sphere_harmonics.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
try:
import _shencoder as _backend
except ImportError:
from .backend import _backend
class _sh_encoder(Function)... | 2,909 | 29.3125 | 86 | py |
AAAI2023-PVD | AAAI2023-PVD-main/shencoder/setup.py | import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
_src_path = os.path.dirname(os.path.abspath(__file__))
nvcc_flags = [
"-O3",
"-std=c++14",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
]... | 1,831 | 26.757576 | 117 | py |
AAAI2023-PVD | AAAI2023-PVD-main/shencoder/backend.py | import os
from torch.utils.cpp_extension import load
_src_path = os.path.dirname(os.path.abspath(__file__))
nvcc_flags = [
"-O3",
"-std=c++14",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
]
if os.name == "posix":
c_flags = ["-O3", "-std=c... | 1,458 | 24.596491 | 117 | py |
factorizer | factorizer-master/setup.py | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="factorizer",
version="0.0.2",
author="Pooya Ashtari",
author_email="pooya.ash@gmail.com",
description="Factorizer - PyTorch",
license="Apache 2.0",
long_description=... | 1,326 | 26.081633 | 71 | py |
factorizer | factorizer-master/factorizer/unet.py | import torch
from torch import nn
from .utils.helpers import as_tuple, prod, wrap_class
from .layers.conv import DoubleConv
class Same(object):
def __init__(self, block):
super().__init__()
self.block = block
def __getitem__(self, *args, **kwargs):
return self.block
class UNetStage... | 7,241 | 26.225564 | 87 | py |
factorizer | factorizer-master/factorizer/factorizer.py | from torch import nn
from .utils.helpers import wrap_class
from .layers import LayerNorm, Linear, PositionalEmbedding
from .factorization import Matricize, NMF
from .unet import UNet
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwa... | 5,557 | 26.651741 | 87 | py |
factorizer | factorizer-master/factorizer/tensor_network/base.py | """Implementation of TensorNetwork structure."""
from __future__ import annotations
from typing import (
Optional,
Any,
Hashable,
Callable,
Text,
Tuple,
List,
Sequence,
Iterable,
Set,
Dict,
TypedDict,
)
from copy import copy
import pickle
from itertools import chain
from... | 17,146 | 30.404762 | 89 | py |
factorizer | factorizer-master/factorizer/layers/pos_embed.py | import math
import torch
from torch import nn
class SinusoidalPositionalEmbedding(nn.Module):
"""Sinusoidal positional embedding."""
def __init__(self, channels, spatial_size, **kwargs):
super().__init__()
spatial_dims = len(spatial_size)
freqs = torch.exp(torch.arange(0, channels, 2... | 3,145 | 30.148515 | 89 | py |
factorizer | factorizer-master/factorizer/layers/norm.py | import torch
from torch import nn
class LayerNorm(nn.Module):
""""Layer norm for channels-first inputs."""
def __init__(self, dim, **kwargs):
super().__init__()
self.norm = nn.LayerNorm(dim, **kwargs)
def forward(self, x):
# x: B × C × S1 × S2 × ... × Sp
out = torch.einsu... | 449 | 24 | 53 | py |
factorizer | factorizer-master/factorizer/layers/mlp.py | from torch import nn
from torch.nn.modules.utils import _pair
from .linear import Linear
class MLP(nn.Module):
def __init__(
self,
in_channels,
out_channels=None,
hidden_channels=None,
ratio=2,
dropout=0.0,
**kwargs,
):
super().__init__()
... | 846 | 23.911765 | 84 | py |
factorizer | factorizer-master/factorizer/layers/misc.py | from torch import nn
class DepthWiseP2P(nn.Module):
"Depth-wise patch-to-patch transform."
def __init__(self, size):
super().__init__()
# patches already flattened in the matricization step
num_pixels = size[-1] # last dim is #pixels in a patch
self.linear = nn.Linear(num_pix... | 445 | 26.875 | 63 | py |
factorizer | factorizer-master/factorizer/layers/linear.py | from torch import nn
class Linear(nn.Module):
""" "Linear layer for channels-first inputs."""
def __init__(
self,
in_channels,
out_channels,
bias=True,
device=None,
dtype=None,
):
super().__init__()
self.flatten = nn.Flatten(start_dim=2)
... | 696 | 20.78125 | 51 | py |
factorizer | factorizer-master/factorizer/layers/conv.py | import torch
from torch import nn
from torchvision.ops import StochasticDepth
from .linear import Linear
from ..utils.helpers import as_tuple, prod, wrap_class
class DoubleConv(nn.Module):
"""(Conv -- Drop -- Norm -- Act) ** 2."""
def __init__(
self,
in_channels,
out_channels,
... | 9,369 | 26.080925 | 86 | py |
factorizer | factorizer-master/factorizer/layers/attention.py | import math
import torch
from torch import nn
from performer_pytorch import SelfAttention
class ScaledDotProductAttention(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.softmax = nn.Softmax(dim=-2)
def attention_weights(self, query, key, mask):
_, E, _ = que... | 3,269 | 30.142857 | 81 | py |
factorizer | factorizer-master/factorizer/utils/schedulers.py | import math
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
class WarmupCosineSchedule(LambdaLR):
"""Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining s... | 1,072 | 28.805556 | 84 | py |
factorizer | factorizer-master/factorizer/factorization/tensor_factorizations.py | from typing import Tuple, Union, Optional, Any, Callable, Dict, Sequence
import math
from itertools import permutations
import torch
from torch import nn
import einops
from ..utils.helpers import as_tuple, null_context, prod
from .base import TF
from ..tensor_network import (
TensorNetwork,
SingleTensor,
... | 14,112 | 29.220557 | 89 | py |
factorizer | factorizer-master/factorizer/factorization/base.py | from typing import Any, Dict, Optional, Sequence, Tuple
import math
import torch
from torch import nn
from ..utils.helpers import wrap_class, null_context
from .operations import t, relative_error
from ..tensor_network import TensorNetwork, SingleTensor
class MF(nn.Module):
"""Base module for matrix factorizati... | 7,328 | 29.924051 | 87 | py |
factorizer | factorizer-master/factorizer/factorization/matrix_factorizations.py | from typing import Union, Optional, Any, Callable, Dict, Sequence, Tuple
from abc import ABC
import copy
import math
import random
import torch
from torch import nn
import opt_einsum as oe
from ..utils.helpers import (
as_tuple,
wrap_class,
null_context,
is_wrappable_class,
)
from .operations import t... | 37,441 | 30.974381 | 90 | py |
factorizer | factorizer-master/factorizer/factorization/operations.py | from typing import Union, Optional, Sequence
import torch
from torch import nn
from torch.nn.modules.utils import _ntuple
import einops
from einops.layers.torch import Rearrange
import opt_einsum as oe
def t(x: torch.Tensor) -> torch.Tensor:
"""Transpose a tensor, i.e. "b i j -> b j i"."""
return x.transpose... | 11,344 | 30.513889 | 90 | py |
I-BAU | I-BAU-main/poi_util.py | import numpy as np
import random
import imageio
import torch.nn as nn
def patching(clean_sample, attack, pert=None, dataset_nm = 'CIFAR'):
'''
this code conducts a patching procedure to generate backdoor data
**please make sure the input sample's label is different from the target label
clean_sample: ... | 3,714 | 35.782178 | 119 | py |
I-BAU | I-BAU-main/defense.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim import Optimizer
import torch.backends.cudnn as cudnn
import torchvision
from torch.utils.data import TensorDataset, DataLoader
import os, logging, sys
import random
import matplotlib.pyplot as plt
import num... | 8,951 | 34.383399 | 132 | py |
I-BAU | I-BAU-main/models.py | import torch
import torch.nn as nn
cfg = {'small_VGG16': [32, 32, 'M', 64, 64, 'M', 128, 128, 'M'],}
drop_rate = [0.3,0.4,0.4]
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(2048, ... | 1,026 | 29.205882 | 79 | py |
I-BAU | I-BAU-main/hypergrad.py | import torch
from itertools import repeat
from typing import List, Callable
from torch import Tensor
from torch.autograd import grad as torch_grad
'''
Based on the paper 'On the Iteration Complexity of Hypergradient Computation,' this code was created.
Source: https://github.com/prolearner/hypertorch/blob/master/hyper... | 5,758 | 38.176871 | 118 | py |
SmaAt-UNet | SmaAt-UNet-master/train_SmaAtUNet.py | from typing import Optional
from models.SmaAt_UNet import SmaAt_UNet
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch import nn
from torchvision import transforms
from root import ROOT_DIR
from utils import dataset_VOC
import time
from tqdm import tqdm
from metric import iou
imp... | 6,411 | 31.221106 | 116 | py |
SmaAt-UNet | SmaAt-UNet-master/test_precip_lightning.py | import torch
from torch import nn
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
from tqdm import tqdm
import lightning.pytorch as pl
from root import ROOT_DIR
from utils import dataset_precip, model_classes
def get_model_loss(model, test_dl, loss="mse", denormalize=True):
model.eval(... | 5,515 | 35.052288 | 118 | py |
SmaAt-UNet | SmaAt-UNet-master/calc_metrics_test_set.py | import torch
from root import ROOT_DIR
from utils import dataset_precip, model_classes
from tqdm import tqdm
import os
import pickle
import numpy as np
def get_metrics_from_model(model, test_dl, threshold=0.5):
# Precision = tp/(tp+fp)
# Recall = tp/(tp+fn)
# Accuracy = (tp+tn)/(tp+fp+tn+fn)
# F1 = 2... | 3,170 | 35.034091 | 114 | py |
SmaAt-UNet | SmaAt-UNet-master/train_precip_lightning.py | from root import ROOT_DIR
import lightning.pytorch as pl
from lightning.pytorch.callbacks import (
ModelCheckpoint,
LearningRateMonitor,
EarlyStopping,
)
from lightning.pytorch import loggers
import argparse
from models import unet_precip_regression_lightning as unet_regr
def get_batch_size(hparams):
... | 4,032 | 34.377193 | 118 | py |
SmaAt-UNet | SmaAt-UNet-master/metric/confusionmatrix.py | import numpy as np
import torch
from metric import metric
class ConfusionMatrix(metric.Metric):
"""Constructs a confusion matrix for a multi-class classification problems.
Does not support multi-label, multi-class problems.
Keyword arguments:
- num_classes (int): number of classes in the classificatio... | 3,553 | 40.811765 | 119 | py |
SmaAt-UNet | SmaAt-UNet-master/metric/metric.py | class Metric(object):
"""Base class for all metrics.
From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py
"""
def reset(self):
pass
def add(self):
pass
def value(self):
pass
| 245 | 16.571429 | 76 | py |
SmaAt-UNet | SmaAt-UNet-master/models/SmaAt_UNet.py | from torch import nn
from models.unet_parts import OutConv
from models.unet_parts_depthwise_separable import DoubleConvDS, UpDS, DownDS
from models.layers import CBAM
class SmaAt_UNet(nn.Module):
def __init__(
self,
n_channels,
n_classes,
kernels_per_layer=2,
bilinear=True,... | 2,288 | 38.465517 | 96 | py |
SmaAt-UNet | SmaAt-UNet-master/models/regression_lightning.py | import lightning.pytorch as pl
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from utils import dataset_precip
import argparse
import numpy as np
class UNet_base(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_... | 5,604 | 36.366667 | 95 | py |
SmaAt-UNet | SmaAt-UNet-master/models/unet_parts.py | """ Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_c... | 2,521 | 33.081081 | 122 | py |
SmaAt-UNet | SmaAt-UNet-master/models/layers.py | import torch
from torch import nn
import torch.nn.functional as F
# Taken from https://discuss.pytorch.org/t/is-there-any-layer-like-tensorflows-space-to-depth-function/3487/14
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(s... | 5,245 | 35.943662 | 110 | py |
SmaAt-UNet | SmaAt-UNet-master/models/unet_parts_depthwise_separable.py | """ Parts of the U-Net model """
# Base model taken from: https://github.com/milesial/Pytorch-UNet
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.layers import DepthwiseSeparableConv
class DoubleConvDS(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_c... | 3,225 | 32.957895 | 122 | py |
SmaAt-UNet | SmaAt-UNet-master/utils/dataset_precip.py | from torch.utils.data import Dataset
import h5py
import numpy as np
class precipitation_maps_h5(Dataset):
def __init__(self, in_file, num_input_images, num_output_images, train=True, transform=None):
super(precipitation_maps_h5, self).__init__()
self.file_name = in_file
self.n_images, sel... | 5,044 | 38.414063 | 118 | py |
SmaAt-UNet | SmaAt-UNet-master/utils/model_classes.py | from models import unet_precip_regression_lightning as unet_regr
import lightning.pytorch as pl
from typing import Tuple, Type
def get_model_class(model_file) -> Tuple[Type[pl.LightningModule], str]:
# This is for some nice plotting
if "UNet_Attention" in model_file:
model_name = "UNet Attention"
... | 1,184 | 36.03125 | 72 | py |
SmaAt-UNet | SmaAt-UNet-master/utils/dataset_VOC.py | import random
import torch
from torch.utils.data import Dataset
from PIL import Image
from pathlib import Path
from torchvision import transforms
import torchvision.transforms.functional as TF
import numpy as np
import matplotlib.pyplot as plt
def get_pascal_labels():
"""Load the mapping that associates pascal cl... | 4,992 | 28.544379 | 97 | py |
SmaAt-UNet | SmaAt-UNet-master/utils/data_loader_precip.py | import torch
import numpy as np
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
from utils import dataset_precip
# Taken from: https://gist.github.com/kevinzakka/d33bf8d6c7f06a9d8c76d97a7879f5cb
def get_train_valid_loader(
data_dir,
batch_size,
random_seed,
... | 6,979 | 29.884956 | 86 | py |
SEDIM | SEDIM-main/main.py |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import keras
from keras import Model
from test import *
from keras.layers import Input, Dense,Dropout
import os
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn.model_selection ... | 4,029 | 30 | 96 | py |
SEDIM | SEDIM-main/imputeByBBO.py |
import scipy
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import csv
from BBO import BBO
import pandas as pd
import scipy.io
import matplotlib as mpl
import numpy as np
import keras
mpl.use('Agg')
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr
mpl.use('Agg')
import tensorflow as ... | 13,478 | 45.003413 | 122 | py |
SEDIM | SEDIM-main/multinet_bbo.py | import os
import warnings
import tempfile
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
import tensorflow.keras as keras
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Dense, Dropout, Input,BatchNormalization
from keras.callbacks impor... | 14,337 | 35.858612 | 128 | py |
stablediffusion | stablediffusion-main/setup.py | from setuptools import setup, find_packages
setup(
name='stable-diffusion',
version='0.0.1',
description='',
packages=find_packages(),
install_requires=[
'torch',
'numpy',
'tqdm',
],
) | 233 | 17 | 43 | py |
stablediffusion | stablediffusion-main/scripts/txt2img.py | import argparse, os
import cv2
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from itertools import islice
from einops import rearrange
from torchvision.utils import make_grid
from pytorch_lightning import seed_everything
from torch import autocast
fr... | 13,795 | 34.465296 | 122 | py |
stablediffusion | stablediffusion-main/scripts/img2img.py | """make variations of input image"""
import argparse, os
import PIL
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from itertools import islice
from einops import rearrange, repeat
from torchvision.utils import make_grid
from torch import autocast
fr... | 8,700 | 30.075 | 117 | py |
stablediffusion | stablediffusion-main/scripts/gradio/depth2img.py | import sys
import torch
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from einops import repeat, rearrange
from pytorch_lightning import seed_everything
from imwatermark import WatermarkEncoder
from scripts.txt2img import put_watermark
from ldm.util import instantiate_fro... | 7,097 | 37.367568 | 114 | py |
stablediffusion | stablediffusion-main/scripts/gradio/inpainting.py | import sys
import cv2
import torch
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from einops import repeat
from imwatermark import WatermarkEncoder
from pathlib import Path
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config
to... | 6,419 | 31.755102 | 107 | py |
stablediffusion | stablediffusion-main/scripts/gradio/superresolution.py | import sys
import torch
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from einops import repeat, rearrange
from pytorch_lightning import seed_everything
from imwatermark import WatermarkEncoder
from scripts.txt2img import put_watermark
from ldm.models.diffusion.ddim impor... | 7,438 | 36.570707 | 122 | py |
stablediffusion | stablediffusion-main/scripts/streamlit/depth2img.py | import sys
import torch
import numpy as np
import streamlit as st
from PIL import Image
from omegaconf import OmegaConf
from einops import repeat, rearrange
from pytorch_lightning import seed_everything
from imwatermark import WatermarkEncoder
from scripts.txt2img import put_watermark
from ldm.util import instantiate_... | 6,421 | 39.64557 | 129 | py |
stablediffusion | stablediffusion-main/scripts/streamlit/inpainting.py | import sys
import cv2
import torch
import numpy as np
import streamlit as st
from PIL import Image
from omegaconf import OmegaConf
from einops import repeat
from streamlit_drawable_canvas import st_canvas
from imwatermark import WatermarkEncoder
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import in... | 6,814 | 33.948718 | 116 | py |
stablediffusion | stablediffusion-main/scripts/streamlit/stableunclip.py | import importlib
import streamlit as st
import torch
import cv2
import numpy as np
import PIL
from omegaconf import OmegaConf
from PIL import Image
from tqdm import trange
import io, os
from torch import autocast
from einops import rearrange, repeat
from torchvision.utils import make_grid
from pytorch_lightning import ... | 16,529 | 38.640288 | 120 | py |
stablediffusion | stablediffusion-main/scripts/streamlit/superresolution.py | import sys
import torch
import numpy as np
import streamlit as st
from PIL import Image
from omegaconf import OmegaConf
from einops import repeat, rearrange
from pytorch_lightning import seed_everything
from imwatermark import WatermarkEncoder
from scripts.txt2img import put_watermark
from ldm.models.diffusion.ddim im... | 6,893 | 39.315789 | 128 | py |
stablediffusion | stablediffusion-main/ldm/util.py | import importlib
import torch
from torch import optim
import numpy as np
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
def autocast(f):
def do_autocast(*args, **kwargs):
with torch.cuda.amp.autocast(enabled=True,
dtype=torch.get_autocast_... | 7,559 | 35.521739 | 119 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.