repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
fitclip
fitclip-main/aligner/__init__.py
0
0
0
py
fitclip
fitclip-main/aligner/transforms.py
"""From https://github.com/pytorch/vision/blob/993325d/references/video_classification/transforms.py""" import random from typing import Any import torch import torch.nn as nn from overrides import overrides from torchvision.transforms import InterpolationMode, RandomResizedCrop, functional as F from util.tensor_util...
2,123
33.258065
103
py
fitclip
fitclip-main/aligner/tests/__init__.py
0
0
0
py
fitclip
fitclip-main/aligner/tests/video_dataset_test.py
import decord import numpy as np from cached_path import cached_path from aligner.data.video_dataset import time_to_indices def test_seek() -> None: # noinspection SpellCheckingInspection video_reader = decord.VideoReader(cached_path("https://mdn.github.io/learning-area/html/multimedia-and-embedding/" ...
831
40.6
118
py
fitclip
fitclip-main/aligner/tests/data/multi_source_sampler_test.py
import string from typing import Literal from torch.utils.data import ConcatDataset, DataLoader, SequentialSampler from aligner.data.multi_source_sampler import RoundRobinMultiSourceSampler def _create_sample_data_loader(mode: Literal["min_size", "max_size_cycle"]) -> DataLoader: dataset1 = string.ascii_lowerca...
1,463
42.058824
117
py
fitclip
fitclip-main/aligner/tests/data/__init__.py
0
0
0
py
fitclip
fitclip-main/aligner/encoder/videoclip_video_text_encoder.py
import os from typing import Iterable, Iterator, Optional import torch from overrides import overrides from torchvision import transforms as T from transformers import AutoTokenizer from aligner.data.frame_sampler import ConsecutiveFrameSampler, FrameSampler from aligner.encoder.s3dg import S3DG from aligner.encoder....
4,268
39.657143
120
py
fitclip
fitclip-main/aligner/encoder/mil_nce_video_text_encoder.py
import re from typing import Any, Iterable, Iterator, Mapping, Optional, Union import numpy as np import torch from cached_path import cached_path from overrides import overrides from torch import nn from torchvision import transforms as T from aligner.data.frame_sampler import ConsecutiveFrameSampler, FrameSampler f...
8,365
41.040201
120
py
fitclip
fitclip-main/aligner/encoder/video_text_encoder.py
from abc import abstractmethod from typing import Callable, Iterable, Iterator, Mapping, Tuple import torch from overrides import overrides from aligner.encoder.video_encoder import TYPE_VIDEO_INPUT, VideoEncoder TYPE_TEXT_INPUT = Mapping[str, torch.Tensor] TYPE_OUTPUT = Tuple[torch.Tensor, torch.Tensor] TYPE_TOKEN...
994
30.09375
85
py
fitclip
fitclip-main/aligner/encoder/video_encoder.py
from abc import abstractmethod from typing import Callable, Optional, Tuple import torch from overrides import overrides from torch import nn from aligner.data.frame_sampler import FrameSampler TYPE_VIDEO_INPUT = torch.Tensor TYPE_TRANSFORM = Callable[[torch.Tensor], torch.Tensor] class VideoEncoder(nn.Module): ...
2,121
32.15625
107
py
fitclip
fitclip-main/aligner/encoder/slip.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # Copied from https://github.com/facebookresearch/SLIP/tree/c6faf5d import gzip import html from collections import OrderedDict from functools import lru_cache from typing import Iterable, Iterator import ftfy import numpy as np import regex ...
22,773
34.640063
120
py
fitclip
fitclip-main/aligner/encoder/s3dg.py
# Initially copied from the MIL-NCE repo. """Contains the definition for Gated Separable 3D network (S3D-G). """ from typing import Literal, Tuple import torch from overrides import overrides from torch import nn from torch.nn.common_types import _size_3_t, _size_6_t class InceptionBlock(nn.Module): def __init__...
9,814
43.817352
118
py
fitclip
fitclip-main/aligner/encoder/frozen_in_time.py
# Originally from https://github.com/m-bain/frozen-in-time/blob/ba54e43/model/model.py import logging import sys from typing import Any, Dict, Literal, Mapping, MutableMapping, Optional, Tuple, Union import numpy as np import timm import torch import torch.nn as nn import torch.nn.functional as F from cached_path impo...
9,799
49.515464
119
py
fitclip
fitclip-main/aligner/encoder/frozen_in_time_video_text_encoder.py
import os from typing import Iterable, Iterator import torch from overrides import overrides from torchvision import transforms as T from transformers import AutoTokenizer from aligner.data.frame_sampler import FrameSampler, RandomFromUniformIntervalsFrameSampler, UniformFrameSampler from aligner.encoder.frozen_in_ti...
3,686
37.810526
118
py
fitclip
fitclip-main/aligner/encoder/videoclip.py
import torch import torch.utils.checkpoint from torch import nn from transformers import AutoConfig, BertModel, BertPreTrainedModel from transformers.activations import ACT2FN from transformers.models.bert.modeling_bert import BertEmbeddings, BertEncoder class VideoTokenMLP(nn.Module): def __init__(self, config):...
28,028
38.256303
119
py
fitclip
fitclip-main/aligner/encoder/frozen_in_time_stub.py
# To make pickle work with a missing module and class. See https://stackoverflow.com/a/2121918/1165181 class ConfigParser: pass
132
32.25
102
py
fitclip
fitclip-main/aligner/encoder/__init__.py
0
0
0
py
fitclip
fitclip-main/aligner/encoder/video_transformer.py
# From https://github.com/m-bain/frozen-in-time/blob/ba54e43/model/video_transformer.py """ Implementations of Video Transformers in PyTorch A PyTorch implementation of space-time transformer as described in 'Frozen in Time: A Joint Image and Video Encoder for End-to-End Retrieval' - https://arxiv.org/abs/2104.00650 ...
14,253
40.800587
120
py
fitclip
fitclip-main/aligner/encoder/clip_video_text_encoder.py
import os.path import shutil import tempfile from typing import Iterable, Iterator, Tuple import torch from cached_path import cached_path from clip import clip from clip.model import CLIP from overrides import overrides from torch import nn from torchvision import transforms as T from aligner.data.frame_sampler impo...
6,022
39.972789
120
py
fitclip
fitclip-main/aligner/encoder/slip_video_text_encoder.py
from typing import Iterable, Iterator, Union import torch from cached_path import cached_path from overrides import overrides from torchvision import transforms as T from aligner.data.frame_sampler import FrameSampler, UniformFrameSampler from aligner.encoder import slip from aligner.encoder.slip import CLIP, SLIP, S...
3,736
36.37
115
py
fitclip
fitclip-main/aligner/data/youcook2.py
import os from glob import iglob from typing import Optional, Tuple import pandas as pd from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.video_data_module import VideoTextDataModule from aligner.data.video_text_dataset import VideoTextDatase...
2,183
41
116
py
fitclip
fitclip-main/aligner/data/moments_in_time.py
import functools import os from typing import Mapping, Tuple import pandas as pd from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.video_data_module import VideoClassificationDataModule from aligner.data.video_dataset import VideoDataset from...
2,916
43.19697
116
py
fitclip
fitclip-main/aligner/data/frame_sampler.py
import itertools from abc import ABC, abstractmethod from typing import Optional, Sequence import torch from overrides import overrides from util.iter_utils import pairwise from util.video_utils import resample class FrameSampler(ABC): """Returns the frame indices to seek for the given clip start and end frame ...
3,069
38.87013
109
py
fitclip
fitclip-main/aligner/data/video_reader.py
import logging from abc import ABC, abstractmethod from typing import Sequence, Union import PIL import decord import numpy as np import torch import torchvision.datasets import torchvision.transforms.functional from overrides import overrides from util.typing_utils import TYPE_PATH LOGGER = logging.getLogger(__name...
4,118
33.90678
120
py
fitclip
fitclip-main/aligner/data/data_module_group.py
import bisect from abc import ABC from typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Union import pytorch_lightning as pl from overrides import overrides from pytorch_lightning import Trainer from pytorch_lightning.trainer.states import RunningStage from pytorch_lightning.utilities.apply_func imp...
10,134
47.492823
120
py
fitclip
fitclip-main/aligner/data/multi_source_sampler.py
import itertools import math import sys from typing import Generic, Iterable, Iterator, Literal, TypeVar, Union from torch.utils.data import Sampler T_co = TypeVar("T_co", covariant=True) # We don't use `CycleIterator` from PyTorch Lightning because when used along with `itertools.islice`, # it always creates a new...
4,191
38.92381
119
py
fitclip
fitclip-main/aligner/data/video_data_module.py
import multiprocessing from abc import ABC, abstractmethod from typing import Any, Iterable, Mapping, MutableMapping, Optional, Union import pytorch_lightning as pl import torch.cuda from overrides import overrides from pytorch_lightning.utilities.apply_func import apply_to_collection from torch.utils.data import Data...
4,101
44.577778
114
py
fitclip
fitclip-main/aligner/data/video_text_dataset.py
from abc import ABC from typing import Mapping, Union from torch.utils.data.dataloader import default_collate from aligner.data.tokenizer_collate import MappingTokenizerCollate from aligner.data.video_dataset import VideoDataset from aligner.encoder.video_text_encoder import TYPE_TOKENIZER class VideoTextDataset(Vi...
744
42.823529
117
py
fitclip
fitclip-main/aligner/data/hmdb.py
import functools import glob import os from typing import Iterable, Literal, Mapping, Optional, Tuple from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.ucf import UCF_101_TEMPLATES from aligner.data.video_data_module import VideoClassificatio...
3,441
39.023256
116
py
fitclip
fitclip-main/aligner/data/webvid.py
import os import pandas as pd from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.video_data_module import VideoTextDataModule from aligner.data.video_dataset import VideoDataset from aligner.data.video_text_dataset import VideoTextDataset from...
3,814
49.197368
120
py
fitclip
fitclip-main/aligner/data/video_dataset.py
import collections.abc import functools import logging import os from abc import ABC, abstractmethod from typing import Any, Generic, Iterable, Mapping, Optional, Sequence, Tuple, TypeVar, Union import torch from overrides import overrides from torch.nn.utils.rnn import pad_sequence from torch.utils.data import Datase...
5,261
43.59322
120
py
fitclip
fitclip-main/aligner/data/__init__.py
0
0
0
py
fitclip
fitclip-main/aligner/data/tokenizer_collate.py
import collections.abc from abc import ABC, abstractmethod from typing import Any, Callable, Iterable, Mapping, Sequence, Tuple, Union from overrides import overrides from pytorch_lightning.utilities.apply_func import apply_to_collection from torch.utils.data.dataloader import default_collate from aligner.encoder.vid...
4,865
53.066667
120
py
fitclip
fitclip-main/aligner/data/kinetics.py
import os from typing import Iterable, Mapping, Optional, Tuple import pandas as pd from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.video_data_module import VideoClassificationDataModule from aligner.data.video_dataset import VideoDataset f...
6,115
49.131148
116
py
fitclip
fitclip-main/aligner/data/msrvtt.py
import json import os import random from typing import Literal import pandas as pd from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.video_data_module import VideoTextDataModule from aligner.data.video_dataset import VideoDataset from aligner...
3,743
45.8
119
py
fitclip
fitclip-main/aligner/data/didemo.py
import json import os from collections import defaultdict from cached_path import CACHE_DIR, _find_latest_cached, cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.video_data_module import VideoTextDataModule from aligner.data.video_text_dataset import VideoTextData...
3,269
47.088235
119
py
fitclip
fitclip-main/aligner/data/conceptual_captions.py
import functools import os import pandas as pd from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from torchvision.datasets.folder import IMG_EXTENSIONS from aligner.data.video_data_module import VideoTextDataModule from aligner.data.video_dataset import VideoD...
3,241
48.121212
116
py
fitclip
fitclip-main/aligner/data/ucf.py
import functools import os import re from typing import Iterable, Mapping, Optional, Tuple from cached_path import cached_path from overrides import overrides from torch.utils.data import DataLoader from aligner.data.video_data_module import VideoClassificationDataModule from aligner.data.video_dataset import VideoDa...
5,400
40.229008
116
py
VQ-Diffusion
VQ-Diffusion-main/inference_VQ_Diffusion.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '../')) import torch import cv2 import ...
9,903
48.029703
239
py
VQ-Diffusion
VQ-Diffusion-main/train.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import argparse import os import warnings import time import torch from image_synthesis.modeling.build import build_m...
6,809
39.058824
138
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/distributed/launch.py
import os import torch from torch import distributed as dist from torch import multiprocessing as mp # import distributed as dist_fn import image_synthesis.distributed.distributed as dist_fn def find_free_port(): import socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", 0)) ...
2,604
26.712766
101
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/distributed/distributed.py
import math import pickle import torch from torch import distributed as dist from torch.utils import data LOCAL_PROCESS_GROUP = None def is_primary(): return get_rank() == 0 def get_rank(): if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.g...
3,169
20.418919
76
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/engine/lr_scheduler.py
import torch import math # from torch.optim import AdamW, Adam from torch._six import inf from torch.optim.optimizer import Optimizer from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR class ReduceLROnPlateauWithWarmup(object): """Reduce learning rate when a metric has stopped improving. Mo...
11,992
40.071918
128
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/engine/clip_grad_norm.py
from torch.nn.utils import clip_grad_norm_ class ClipGradNorm(object): def __init__(self, start_iteration=0, end_iteration=-1, # if negative, the norm will be always clipped max_norm=0.5): self.start_iteration = start_iteration self.end_iteratio...
935
29.193548
81
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/engine/logger.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import sys import torch from image_synthesis.utils.io import write_args, save_config_to_yaml from image_synthesis.distributed.distributed import is_primary import torch.utils.tensorboard a...
3,005
32.4
132
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/engine/solver.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import os import time import math import torch import threading import multiprocessing import copy from PIL import Im...
26,443
47.08
204
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/engine/ema.py
import torch import copy class EMA(object): def __init__(self, model, decay=0.99, update_interval=1, device=torch.device('cpu')): self.decay = decay self.update_iterval = update_interval self.device = device se...
2,968
42.028986
127
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/utils/misc.py
import importlib import random import numpy as np import torch import warnings import os def seed_everything(seed, cudnn_deterministic=False): """ Function that sets seed for pseudo-random number generators in: pytorch, numpy, python.random Args: seed: the integer value seed for global ra...
5,066
29.160714
119
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/utils/io.py
import sys import yaml import torch import json def load_yaml_config(path): with open(path) as f: config = yaml.full_load(f) return config def save_config_to_yaml(config, path): assert path.endswith('.yaml') with open(path, 'w') as f: f.write(yaml.dump(config)) f.close() def ...
1,067
28.666667
98
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/cub200_dataset.py
from torch.utils.data import Dataset import numpy as np import io from PIL import Image import os import json import random from image_synthesis.utils.misc import instantiate_from_config from tqdm import tqdm import pickle def load_img(filepath): img = Image.open(filepath).convert('RGB') return img class Cub2...
2,040
33.016667
129
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/mscoco_dataset.py
from torch.utils.data import Dataset import numpy as np import io from PIL import Image import os import json import random from image_synthesis.utils.misc import instantiate_from_config def load_img(filepath): img = Image.open(filepath).convert('RGB') return img class CocoDataset(Dataset): def __init__(s...
1,873
36.48
129
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/imagenet_dataset.py
from torch.utils.data import Dataset import numpy as np import io from PIL import Image import os import json import random from image_synthesis.utils.misc import instantiate_from_config def load_img(filepath): img = Image.open(filepath).convert('RGB') return img class ImageNetDataset(Dataset): def __init...
2,016
33.775862
132
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/ffhq_dataset.py
from torch.utils.data import Dataset import numpy as np import io from PIL import Image import os import json import random from image_synthesis.utils.misc import instantiate_from_config import torchvision.datasets as datasets class FFHQDataset(datasets.ImageFolder): def __init__(self, data_root, im_preprocessor_c...
848
31.653846
86
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/build.py
import torch # from image_synthesis.data.base_dataset import ConcatDatasetWithIndex as ConcatDataset from torch.utils.data import ConcatDataset from image_synthesis.utils.misc import instantiate_from_config from image_synthesis.distributed.distributed import is_distributed def build_dataloader(config, args=None, retur...
3,454
44.460526
100
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/utils/image_preprocessor.py
import albumentations import random import numpy as np from PIL import Image import cv2 from io import BytesIO from torchvision import transforms as trans class DalleTransformerPreprocessor(object): def __init__(self, size=256, phase='train', additional_targets=N...
3,890
35.364486
140
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/utils/comm.py
""" This file contains primitives for multi-gpu communication. This is useful when doing distributed training. """ import pickle import torch import torch.distributed as dist # from diffdist.functional import all_gather as better_all_gather class Comm(object): def __init__(self, local_rank=0): self.loca...
6,860
28.701299
103
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/data/utils/manage.py
from sys import stdout import zipfile import os.path as osp import lmdb import logging from PIL import Image import pickle import io import glob import os from pathlib import Path import time from threading import Thread from queue import Queue,Empty import subprocess def func_wrapper(func): def sub_func(queue,kwa...
11,184
25.630952
122
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/build.py
from image_synthesis.utils.misc import instantiate_from_config def build_model(config, args=None): return instantiate_from_config(config['model'])
153
24.666667
62
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/modules/clip/simple_tokenizer.py
import gzip import html import os from functools import lru_cache import ftfy import regex as re @lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corr...
4,806
34.087591
144
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/modules/clip/clip.py
import hashlib import os import urllib import warnings from typing import Union, List import torch from PIL import Image from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize from tqdm import tqdm from .model import build_model from .simple_tokenizer import SimpleTokenizer as _Tokenizer ...
7,962
35.360731
142
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/modules/clip/model.py
from collections import OrderedDict from typing import Tuple, Union import torch import torch.nn.functional as F from torch import nn class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1): super().__init__() # all conv layers have stride 1. an avgpool is ...
17,333
38.848276
178
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/modules/clip/clip_tokenizer.py
import gzip import html import os from functools import lru_cache import ftfy import regex as re @lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corr...
4,806
34.087591
144
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/modules/clip/__init__.py
from .clip import *
20
9.5
19
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/codecs/base_codec.py
import torch from torch import nn class BaseCodec(nn.Module): def get_tokens(self, x, **kwargs): """ Input: x: input data Return: indices: B x L, the codebook indices, where L is the length of flattened feature map size """ ...
1,046
23.348837
72
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/taming_gumbel_vqvae.py
import torch import torch.nn as nn from omegaconf import OmegaConf import sys sys.path.append("..") # sys.path.append("../image_synthesis") from image_synthesis.utils.misc import instantiate_from_config from image_synthesis.taming.models.vqgan import GumbelVQ, VQModel from image_synthesis.taming.models.cond_transformer...
10,011
33.885017
112
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/patch_vqgan.py
from numpy.core.shape_base import block from numpy.lib import stride_tricks import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import random from torch.nn.modules.linear import Linear from image_synthesis.utils.misc import instantiate_from_config from image_synthesis.modeling.codecs....
35,439
38.116998
147
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/ema_vqvae.py
import torch import torch.nn as nn from omegaconf import OmegaConf import sys sys.path.append("..") # sys.path.append("../image_synthesis") import os import torchvision.transforms.functional as TF import PIL from image_synthesis.modeling.codecs.base_codec import BaseCodec from einops import rearrange import math import...
4,083
29.706767
112
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/codecs/text_codec/tokenize.py
import torch import torch.nn as nn from image_synthesis.modeling.modules.clip.clip import tokenize from image_synthesis.modeling.codecs.base_codec import BaseCodec from image_synthesis.utils.misc import instantiate_from_config class Tokenize(BaseCodec): def __init__(self, context_length:int = 256, ...
3,124
36.202381
104
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/models/conditional_dalle.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import torch import math from torch import nn from image_synthesis.utils.misc import instantiate_from_config import t...
11,968
40.559028
154
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/models/unconditional_dalle.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import torch import math from torch import nn from image_synthesis.utils.misc import instantiate_from_config import t...
8,216
35.52
138
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/models/dalle.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import torch import math from torch import nn from image_synthesis.utils.misc import instantiate_from_config import t...
14,512
43.246951
154
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/class_embedding.py
import torch import torch.nn as nn from .base_embedding import BaseEmbedding class ClassEmbedding(BaseEmbedding): def __init__(self, num_embed=1000, embed_dim=512, identity=False, trainable=True, ): super().__init__() self...
899
26.272727
74
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/dalle_mask_image_embedding.py
import torch import torch.nn as nn from .base_embedding import BaseEmbedding class DalleMaskImageEmbedding(BaseEmbedding): def __init__(self, num_embed=8192, spatial_size=[32, 32], # height and with embed_dim=3968, trainable=True, ...
2,507
42.241379
173
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/base_embedding.py
import torch from torch import nn class BaseEmbedding(nn.Module): def get_loss(self): return None def forward(self, **kwargs): raise NotImplementedError def train(self, mode=True): self.training = mode if self.trainable and mode: super().train() retur...
507
19.32
49
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/embeddings/clip_text_embedding.py
import torch import torch.nn as nn from image_synthesis.modeling.modules.clip import clip from image_synthesis.modeling.modules.clip import model as clip_model from .base_embedding import BaseEmbedding class CLIPTextEmbedding(BaseEmbedding): def __init__(self, clip_name='ViT-B/32', ...
3,423
37.47191
121
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/utils/misc.py
from numpy.core.fromnumeric import resize from numpy.lib.function_base import kaiser from numpy.lib.npyio import save import torch import random import math from image_synthesis.distributed.distributed import all_reduce, get_world_size def logits_top_k(logits, filter_ratio = 0.5, minimum=1, pad_value=None): logits...
5,282
32.01875
114
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/transformers/diffusion_transformer.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import math import torch from torch import nn import torch.nn.functional as F from image_synthesis.utils.misc import...
29,919
42.678832
166
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/modeling/transformers/transformer_utils.py
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ import math import torch from torch import nn import torch.nn.functional as F from image_synthesis.utils.misc import...
30,407
41
131
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/lr_scheduler.py
import numpy as np class LambdaWarmUpCosineScheduler: """ note: use with a base_lr of 1.0 """ def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): self.lr_warm_up_steps = warm_up_steps self.lr_start = lr_start self.lr_min = lr_min ...
1,205
33.457143
114
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/util.py
import os, hashlib import requests from tqdm import tqdm URL_MAP = { "vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1" } CKPT_MAP = { "vgg_lpips": "vgg.pth" } MD5_MAP = { "vgg_lpips": "d507d7349b931f0638a25a48a722f98a" } def download(url, local_path, chunk_size=1024): os....
4,777
29.240506
85
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/util.py
import torch import torch.nn as nn def count_params(model): total_params = sum(p.numel() for p in model.parameters()) return total_params class ActNorm(nn.Module): def __init__(self, num_features, logdet=False, affine=True, allow_reverse_init=False): assert affine super(...
3,847
28.374046
85
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/vqvae/quantize.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from torch import einsum from einops import rearrange class VectorQuantizer(nn.Module): """ see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py _____________________...
13,259
39.181818
110
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/discriminator/model.py
import functools import torch.nn as nn from image_synthesis.taming.modules.util import ActNorm def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: nn.init.normal_(m.weight...
2,566
36.75
116
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/misc/coord.py
import torch class CoordStage(object): def __init__(self, n_embed, down_factor): self.n_embed = n_embed self.down_factor = down_factor def eval(self): return self def encode(self, c): """fake vqmodel interface""" assert 0.0 <= c.min() and c.max() <= 1.0 b,c...
904
27.28125
79
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/diffusionmodules/model.py
# pytorch_diffusion + derived encoder decoder import math import torch import torch.nn as nn import numpy as np def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This mat...
30,221
37.895753
121
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/transformer/mingpt.py
""" taken from: https://github.com/karpathy/minGPT/ GPT model: - the initial stem consists of a combination of token encoding and a positional encoding - the meat of it is a uniform sequence of Transformer blocks - each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block...
15,743
40.10705
140
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/transformer/permuter.py
import torch import torch.nn as nn import numpy as np class AbstractPermuter(nn.Module): def __init__(self, *args, **kwargs): super().__init__() def forward(self, x, reverse=False): raise NotImplementedError class Identity(AbstractPermuter): def __init__(self): super().__init__()...
7,093
27.48996
83
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/losses/lpips.py
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models""" import torch import torch.nn as nn from torchvision import models from collections import namedtuple from image_synthesis.taming.util import get_ckpt_path class LPIPS(nn.Module): # Learned perceptual metric def __...
4,778
38.172131
104
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/losses/segmentation.py
import torch.nn as nn import torch.nn.functional as F class BCELoss(nn.Module): def forward(self, prediction, target): loss = F.binary_cross_entropy_with_logits(prediction,target) return loss, {} class BCELossWithQuant(nn.Module): def __init__(self, codebook_weight=1.): super().__ini...
816
34.521739
82
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/losses/vqperceptual.py
import torch import torch.nn as nn import torch.nn.functional as F from image_synthesis.taming.modules.losses.lpips import LPIPS from image_synthesis.taming.modules.discriminator.model import NLayerDiscriminator, weights_init class DummyLoss(nn.Module): def __init__(self): super().__init__() def adopt_...
6,211
44.343066
113
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/modules/losses/__init__.py
from image_synthesis.taming.modules.losses.vqperceptual import DummyLoss
74
24
72
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/models/vqgan.py
import torch import torch.nn.functional as F import pytorch_lightning as pl from image_synthesis.utils.misc import instantiate_from_config from image_synthesis.taming.modules.diffusionmodules.model import Encoder, Decoder from image_synthesis.taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer fr...
10,554
39.28626
120
py
VQ-Diffusion
VQ-Diffusion-main/image_synthesis/taming/models/cond_transformer.py
import os, math import torch import torch.nn.functional as F import pytorch_lightning as pl from image_synthesis.utils.misc import instantiate_from_config from image_synthesis.taming.modules.util import SOSProvider def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure trai...
15,049
42.75
127
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_ffhq.py
import os string = "python train.py --name ffhq_train --config_file configs/ffhq.yaml --num_node 1 --tensorboard" os.system(string)
135
18.428571
103
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_tune_coco.py
import os string = "python train.py --name coco_tune --config_file configs/coco_tune.yaml --num_node 1 --tensorboard --load_path OUTPUT/pretrained_model/COCO_pretrained.pth" os.system(string)
195
27
163
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_imagenet.py
import os string = "python train.py --name imagenet_train --config_file configs/imagenet.yaml --num_node 1 --tensorboard" os.system(string)
143
19.571429
111
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_coco.py
import os string = "python train.py --name coco_train --config_file configs/coco.yaml --num_node 1 --tensorboard --load_path OUTPUT/pretrained_model/CC_pretrained.pth" os.system(string)
189
26.142857
157
py
VQ-Diffusion
VQ-Diffusion-main/running_command/run_train_cub.py
import os string = "python train.py --name cub200_train --config_file configs/cub200.yaml --num_node 1 --tensorboard --load_path OUTPUT/pretrained_model/CC_pretrained.pth" os.system(string)
193
26.714286
161
py
Reflect
Reflect-master/mnist_trainer.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags from util.models import MODELS from util.tasks import TASKS FLAGS = flags.FLAGS flags.DEFINE_...
2,498
31.454545
153
py
Reflect
Reflect-master/keras_trainer.py
import os import tensorflow as tf from util import constants from util.config_util import get_model_params, get_task_params, get_train_params from tf2_models.trainer import Trainer from absl import app from absl import flags from util.models import MODELS from util.tasks import TASKS FLAGS = flags.FLAGS flags.DEFINE_...
2,688
34.381579
153
py