repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
multi-label-ood | multi-label-ood-master/utils/dataloader/nus_wide_loader.py | import os
import collections
import json
import numpy as np
from PIL import Image
import torch
import torchvision
from tqdm import tqdm
from torch.utils import data
import random
class nuswideloader(data.Dataset):
def __init__(self, root='./datasets/nus-wide/', split="train",
in_dis=True, img_... | 2,965 | 28.66 | 72 | py |
multi-label-ood | multi-label-ood-master/model/classifiersimple.py | import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
# GroupNorm
class clssimp(nn.Module):
def __init__(self, ch=2880, num_classes=20):
super(clssimp, self).__init__()
self.pool = nn.AdaptiveAvgP... | 1,793 | 23.575342 | 66 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/test.py | import argparse
from pathlib import Path
import torch
import torch.nn as nn
from PIL import Image
from torchvision import transforms
from torchvision.utils import save_image
import time
import net
from function import adaptive_instance_normalization, coral
from function import adaptive_mean_normalization
from function... | 12,131 | 45.129278 | 225 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/sampler.py | import numpy as np
from torch.utils import data
def InfiniteSampler(n):
# i = 0
i = n - 1
order = np.random.permutation(n)
while True:
yield order[i]
i += 1
if i >= n:
np.random.seed()
order = np.random.permutation(n)
i = 0
class InfiniteSa... | 564 | 19.925926 | 54 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/test_video.py | import argparse
from pathlib import Path
from tqdm import tqdm
import torch
import torch.nn as nn
import numpy as np
from PIL import Image
import cv2
import imageio
from torchvision import transforms
from torchvision.utils import save_image
import net
from function import adaptive_instance_normalization, coral
impor... | 7,014 | 34.075 | 115 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/torch_to_pytorch.py | from __future__ import print_function
import argparse
from functools import reduce
import torch
assert torch.__version__.split('.')[0] == '0', 'Only working on PyTorch 0.x.x'
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.serialization import load_lua
class LambdaBase(nn.Sequential):
... | 12,926 | 39.021672 | 88 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/net.py | import torch.nn as nn
import torch
from function import adaptive_mean_normalization as adamean
from function import adaptive_std_normalization as adastd
from function import adaptive_instance_normalization as adain
from function import exact_feature_distribution_matching as efdm
from function import histogram_matching ... | 7,733 | 37.864322 | 121 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/view.py | import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from torchvision import transforms
photo_flag = True # True for photo realistic style, false for normal style transfer.
interplate_flag = False # for mixed intermediate domain
# adahist --> efdm
# adavar --> adastd
# adarealhist --> h... | 8,197 | 43.075269 | 139 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/function.py | import torch
from skimage.exposure import match_histograms
import numpy as np
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_... | 4,616 | 39.858407 | 122 | py |
EFDM | EFDM-main/ArbitraryStyleTransfer/train.py | import argparse
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils.data as data
from PIL import Image, ImageFile
from tensorboardX import SummaryWriter
from torchvision import transforms
from tqdm import tqdm
import net
from sampler import InfiniteSampl... | 4,647 | 32.927007 | 97 | py |
EFDM | EFDM-main/DomainGeneralization/reid/main.py | import sys
import time
import os.path as osp
import argparse
import torch
import torch.nn as nn
import torchreid
from torchreid.utils import (
Logger, check_isfile, set_random_seed, collect_env_info,
resume_from_checkpoint, load_pretrained_weights, compute_model_complexity
)
from default_config import (
i... | 9,656 | 38.097166 | 183 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/osnet_db.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .dropblock import DropBlock2D, LinearScheduler
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0'... | 18,266 | 27.676609 | 108 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/resnet_db.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .dropblock import DropBlock2D, LinearScheduler
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d'... | 16,436 | 27.685864 | 106 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/efdmix.py | import random
from contextlib import contextmanager
import torch
import torch.nn as nn
def deactivate_efdmix(m):
if type(m) == EFDMix:
m.set_activation_status(False)
def activate_efdmix(m):
if type(m) == EFDMix:
m.set_activation_status(True)
def random_efdmix(m):
if type(m) == EFDMix:
... | 3,134 | 25.344538 | 119 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/osnet_efdmix2.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .efdmix import EFDMix
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.goog... | 18,165 | 27.473354 | 108 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/osnet_ms.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .mixstyle import MixStyle
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.... | 21,554 | 27.625498 | 108 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/resnet_efdmix.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .efdmix import EFDMix
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 're... | 19,768 | 27.444604 | 106 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/mixhm.py | ## conducting eCDFs matching with HM.
import random
from contextlib import contextmanager
import torch
import torch.nn as nn
from skimage.exposure import match_histograms
import numpy as np
def search_sorted(bin_locations, inputs, eps=-1e-6):
"""
Searches for which bin an input belongs to (in a way that is pa... | 4,795 | 31.849315 | 201 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/resnet_ms.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .mixstyle import MixStyle
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d',... | 19,818 | 27.516547 | 106 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/mixstyle.py | import random
from contextlib import contextmanager
import torch
import torch.nn as nn
def deactivate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(False)
def activate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(True)
def random_mixstyle(m):
if type(m) =... | 3,127 | 24.430894 | 90 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/osnet_ms2.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .mixstyle import MixStyle
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.... | 18,212 | 27.591837 | 108 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/resnet_ms2.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .mixstyle import MixStyle
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d',... | 16,377 | 27.885362 | 106 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/resnet_efdmix2.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .efdmix import EFDMix
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 're... | 16,332 | 27.654386 | 106 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/osnet_efdmix.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .efdmix import EFDMix
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.goog... | 21,505 | 27.598404 | 108 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/dropblock/dropblock.py | import torch
import torch.nn.functional as F
from torch import nn
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove... | 4,440 | 29.210884 | 98 | py |
EFDM | EFDM-main/DomainGeneralization/reid/models/dropblock/scheduler.py | import numpy as np
from torch import nn
class LinearScheduler(nn.Module):
def __init__(self, dropblock, start_value, stop_value, nr_steps):
super(LinearScheduler, self).__init__()
self.dropblock = dropblock
self.i = 0
self.drop_values = np.linspace(start=start_value, stop=stop_valu... | 546 | 26.35 | 88 | py |
EFDM | EFDM-main/DomainGeneralization/imcls/vis.py | import argparse
import torch
import os.path as osp
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
def normalize(feature):
norm = np.sqrt((feature**2).sum(1, keepdims=True))
return feature / (norm + 1e-12)
def main():
parser... | 4,207 | 28.222222 | 208 | py |
EFDM | EFDM-main/DomainGeneralization/imcls/train.py | import argparse
import copy
import torch
from dassl.utils import setup_logger, set_random_seed, collect_env_info
from dassl.config import get_cfg_default
from dassl.engine import build_trainer
import time
# custom
from yacs.config import CfgNode as CN
import datasets.ssdg_pacs
import datasets.ssdg_officehome
import d... | 7,586 | 27.738636 | 136 | py |
EFDM | EFDM-main/DomainGeneralization/imcls/trainers/semimixstyle.py | import torch
from torch.nn import functional as F
from dassl.data import DataManager
from dassl.engine import TRAINER_REGISTRY, TrainerXU
from dassl.metrics import compute_accuracy
from dassl.data.transforms import build_transform
from dassl.modeling.ops import deactivate_mixstyle, run_with_mixstyle
@TRAINER_REGISTR... | 4,771 | 35.427481 | 79 | py |
EFDM | EFDM-main/DomainGeneralization/imcls/trainers/vanilla2.py | from torch.nn import functional as F
import torch
from dassl.engine import TRAINER_REGISTRY, TrainerX
from dassl.metrics import compute_accuracy
from dassl.modeling.ops import random_efdmix, crossdomain_efdmix
import numpy as np
import os.path as osp
# import ipdb
@TRAINER_REGISTRY.register()
class Vanilla2(TrainerX):... | 7,343 | 31.352423 | 77 | py |
graphCRNs | graphCRNs-master/gcrn/graphNet/torch_gcn.py | # Implementation of GCN Regressor in PyTorch
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch_geometric.nn import GCNConv
class GCNReg(torch.nn.Module):
def __init__(self, hidden_channels, num_features) -> None:
super(GCNReg, self).__init__()
torch.ma... | 2,063 | 32.290323 | 80 | py |
graphCRNs | graphCRNs-master/gcrn/graphNet/graph_cnn.py | # coding=utf-8
import time
from tqdm import tqdm
import tf_geometric as tfg
import tensorflow as tf
from tf_geometric.utils import tf_utils
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
graph, (train_index, valid_index,
test_index) = tfg.datasets.CoraDataset().load_data()
num_classes = graph.y.max() + 1
... | 4,014 | 30.124031 | 125 | py |
graphCRNs | graphCRNs-master/build/lib/gcrn/graphNet/torch_gcn.py | # Implementation of GCN Regressor in PyTorch
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch_geometric.nn import GCNConv
class GCNReg(torch.nn.Module):
def __init__(self, hidden_channels, num_features) -> None:
super(GCNReg, self).__init__()
torch.ma... | 2,063 | 32.290323 | 80 | py |
graphCRNs | graphCRNs-master/build/lib/gcrn/graphNet/graph_cnn.py | # coding=utf-8
import time
from tqdm import tqdm
import tf_geometric as tfg
import tensorflow as tf
from tf_geometric.utils import tf_utils
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
graph, (train_index, valid_index,
test_index) = tfg.datasets.CoraDataset().load_data()
num_classes = graph.y.max() + 1
... | 4,014 | 30.124031 | 125 | py |
EnsembleBench | EnsembleBench-main/EnsembleBench/frameworks/pytorchUtility.py | import os
import time
import torch
import torch.nn as nn
import numpy as np
from collections import Counter
def calAccuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, Tru... | 5,359 | 33.358974 | 101 | py |
SpykeTorch | SpykeTorch-master/KheradpishehDeep.py | ###################################################################################
# Reimplementation of the Digit Recognition Experiment (MNIST) Performed in: #
# https://www.sciencedirect.com/science/article/pii/S0893608017302903 #
# ... | 8,026 | 36.162037 | 126 | py |
SpykeTorch | SpykeTorch-master/MozafariShallow.py | ##########################################################################
# Reimplementation of the Object Recognition Experiments Performed in: #
# https://ieeexplore.ieee.org/document/8356226/ #
# #
# Reference: ... | 11,177 | 40.247232 | 140 | py |
SpykeTorch | SpykeTorch-master/MozafariDeep.py | #################################################################################
# Reimplementation of the 10-Class Digit Recognition Experiment Performed in: #
# https://arxiv.org/abs/1804.00227 #
# ... | 12,162 | 38.618893 | 126 | py |
SpykeTorch | SpykeTorch-master/SpykeTorch/visualization.py | import torch
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Show 2D the tensor.
def show_tensor(aTensor, _vmin = None, _vmax = None):
r"""Plots a 2D tensor in gray color map and shows it in a window.
Args:
aTensor (Tensor): The input tensor.
_vmin (... | 3,611 | 43.04878 | 138 | py |
SpykeTorch | SpykeTorch-master/SpykeTorch/functional.py | import torch
import torch.nn as nn
import torch.nn.functional as fn
import numpy as np
from .utils import to_pair
# padding
# pad = (padLeft, padRight, padTop, padBottom)
def pad(input, pad, value=0):
r"""Applies 2D padding on the input tensor.
Args:
input (Tensor): The input tensor.
pad (tupl... | 11,763 | 41.778182 | 140 | py |
SpykeTorch | SpykeTorch-master/SpykeTorch/utils.py | import torch
import torch.nn.functional as fn
import numpy as np
import math
from torchvision import transforms
from torchvision import datasets
import os
def to_pair(data):
r"""Converts a single or a tuple of data into a pair. If the data is a tuple with more than two elements, it selects
the first two of the... | 15,909 | 40.005155 | 155 | py |
SpykeTorch | SpykeTorch-master/SpykeTorch/snn.py | import torch
import torch.nn as nn
import torch.nn.functional as fn
from . import functional as sf
from torch.nn.parameter import Parameter
from .utils import to_pair
class Convolution(nn.Module):
r"""Performs a 2D convolution over an input spike-wave composed of several input
planes. Current version only supp... | 11,690 | 46.913934 | 169 | py |
OpenPCDet | OpenPCDet-master/setup.py | import os
import subprocess
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
def get_git_commit_number():
if not os.path.exists('.git'):
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
... | 4,478 | 31.456522 | 95 | py |
OpenPCDet | OpenPCDet-master/tools/test.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet... | 8,576 | 40.235577 | 120 | py |
OpenPCDet | OpenPCDet-master/tools/demo.py | import argparse
import glob
from pathlib import Path
try:
import open3d
from visual_utils import open3d_vis_utils as V
OPEN3D_FLAG = True
except:
import mayavi.mlab as mlab
from visual_utils import visualize_utils as V
OPEN3D_FLAG = False
import numpy as np
import torch
from pcdet.config impo... | 3,750 | 32.19469 | 118 | py |
OpenPCDet | OpenPCDet-master/tools/train.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import bu... | 10,316 | 43.662338 | 175 | py |
OpenPCDet | OpenPCDet-master/tools/eval_utils/eval_utils.py | import pickle
import time
import numpy as np
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thr... | 5,219 | 36.021277 | 131 | py |
OpenPCDet | OpenPCDet-master/tools/train_utils/train_utils.py | import os
import torch
import tqdm
import time
import glob
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, datal... | 12,462 | 44.819853 | 150 | py |
OpenPCDet | OpenPCDet-master/tools/train_utils/optimization/fastai_optim.py | # This file is modified from https://github.com/traveller59/second.pytorch
try:
from collections.abc import Iterable
except:
from collections import Iterable
import torch
from torch import nn
from torch._utils import _unflatten_dense_tensors
from torch.nn.utils import parameters_to_vector
bn_types = (nn.Batc... | 10,535 | 38.758491 | 117 | py |
OpenPCDet | OpenPCDet-master/tools/train_utils/optimization/learning_schedules_fastai.py | # This file is modified from https://github.com/traveller59/second.pytorch
import math
from functools import partial
import numpy as np
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
class LRSchedulerStep(object):
def __init__(self, fai_optimizer: OptimWrapper, total_step, l... | 6,102 | 36.441718 | 118 | py |
OpenPCDet | OpenPCDet-master/tools/train_utils/optimization/__init__.py | from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle, CosineAnnealing
def build_optimizer(model, optim_cfg):
if optim_cfg.OPTIMIZER == 'ad... | 2,746 | 38.811594 | 128 | py |
OpenPCDet | OpenPCDet-master/tools/visual_utils/open3d_vis_utils.py | """
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1... | 3,413 | 28.179487 | 126 | py |
OpenPCDet | OpenPCDet-master/tools/visual_utils/visualize_utils.py | import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
... | 8,540 | 38.541667 | 121 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/__init__.py | from collections import namedtuple
import numpy as np
import torch
from .detectors import build_detector
try:
import kornia
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
def build_network(model_cfg, num_class, dataset):
model = build_detector(
... | 1,553 | 27.254545 | 101 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/view_transforms/depth_lss.py | import torch
from torch import nn
from pcdet.ops.bev_pool import bev_pool
def gen_dx_bx(xbound, ybound, zbound):
dx = torch.Tensor([row[2] for row in [xbound, ybound, zbound]])
bx = torch.Tensor([row[0] + row[2] / 2.0 for row in [xbound, ybound, zbound]])
nx = torch.LongTensor(
[(row[1] - row[0]) ... | 9,959 | 37.604651 | 118 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/detectors/mppnet_e2e.py | import torch
import os
import numpy as np
import copy
from ...utils import common_utils
from ..model_utils import model_nms_utils
from .detector3d_template import Detector3DTemplate
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.datasets.augmentor import augmentor_utils, database_sampler
class MPPNetE2E(D... | 9,379 | 41.06278 | 107 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/detectors/mppnet.py | import torch
from .detector3d_template import Detector3DTemplate
from pcdet.ops.iou3d_nms import iou3d_nms_utils
import os
import numpy as np
import time
from ...utils import common_utils
from ..model_utils import model_nms_utils
from pcdet.datasets.augmentor import augmentor_utils, database_sampler
class MPPNet(Dete... | 7,844 | 42.104396 | 107 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/detectors/detector3d_template.py | import os
import torch
import torch.nn as nn
import numpy as np
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_ut... | 19,433 | 45.716346 | 150 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/detectors/second_net_iou.py | import torch
from .detector3d_template import Detector3DTemplate
from ..model_utils.model_nms_utils import class_agnostic_nms
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
class SECONDNetIoU(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model... | 7,499 | 41.134831 | 127 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/spconv_unet.py | from functools import partial
import torch
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
from ...utils import common_utils
from .spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, ... | 8,602 | 39.389671 | 117 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/spconv_backbone_voxelnext2d.py | from functools import partial
import torch
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = s... | 8,644 | 38.295455 | 161 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/spconv_backbone.py | from functools import partial
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = spconv.SubMCo... | 10,458 | 34.334459 | 118 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/dsvt.py | import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from math import ceil
from pcdet.models.model_utils.dsvt_utils import get_window_coors, get_inner_win_inds_cuda, get_pooling_index, get_continous_inds
from pcdet.models.model_utils.dsvt_utils import PositionEmbeddingLearned
class DSVT(n... | 32,046 | 50.940032 | 184 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/spconv_backbone_voxelnext.py | from functools import partial
import torch
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = s... | 8,945 | 38.584071 | 177 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/spconv_backbone_2d.py | from functools import partial
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = spconv.SubMCo... | 10,492 | 33.860465 | 116 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/spconv_backbone_focal.py | from functools import partial
import torch
from pcdet.utils.spconv_utils import spconv
import torch.nn as nn
from .focal_sparse_conv.focal_sparse_conv import FocalSparseConv
from .focal_sparse_conv.SemanticSeg.pyramid_ffn import PyramidFeat2D
class objDict:
@staticmethod
def to_object(obj: object, **data):
... | 10,218 | 36.848148 | 130 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/pointnet2_backbone.py | import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_modules_stack
from ...ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_utils_stack
class PointNet2MSG(nn.Module):
def __in... | 8,540 | 40.26087 | 132 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
def bilinear_interpolate_torch(im, x, ... | 16,404 | 38.817961 | 127 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_conv.py | import torch
import torch.nn as nn
from pcdet.utils.spconv_utils import spconv
from pcdet.ops.roiaware_pool3d.roiaware_pool3d_utils import points_in_boxes_gpu
from pcdet.models.backbones_3d.focal_sparse_conv.focal_sparse_utils import split_voxels, check_repeat, FocalLoss
from pcdet.utils import common_utils
class Foc... | 10,810 | 47.048889 | 213 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=2.0, eps=1e-7):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
def one_hot(self, index, classes):
s... | 6,450 | 42.587838 | 165 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/basic_blocks.py | import torch.nn as nn
class BasicBlock1D(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
"""
Initializes convolutional block
Args:
in_channels: int, Number of input channels
out_channels: int, Number of output channels
**kwargs: Dict... | 2,052 | 30.106061 | 60 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/sem_deeplabv3.py | from collections import OrderedDict
from pathlib import Path
from torch import hub
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class SegTemplate(nn.Module):
def __init__(self, constructor, feat_extract_layer, num_classes, pretrained_path=None, aux_loss=None):
"""... | 6,184 | 37.416149 | 136 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/pyramid_ffn.py | import torch
import torch.nn as nn
from .basic_blocks import BasicBlock2D
from .sem_deeplabv3 import SemDeepLabV3
class PyramidFeat2D(nn.Module):
def __init__(self, optimize, model_cfg):
"""
Initialize 2D feature network via pretrained model
Args:
model_cfg: EasyDict, Dense cla... | 2,744 | 34.192308 | 88 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/vfe_template.py | import torch.nn as nn
class VFETemplate(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
"""
Args:
**kwargs:
... | 470 | 19.478261 | 45 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/dynamic_mean_vfe.py | import torch
from .vfe_template import VFETemplate
try:
import torch_scatter
except Exception as e:
# Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter
pass
from .vfe_template import VFETemplate
class DynamicMeanVFE(VFETemplate):
def __init__(self, model_cfg, ... | 2,980 | 37.714286 | 106 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/mean_vfe.py | import torch
from .vfe_template import VFETemplate
class MeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
def get_output_feature_dim(self):
return self.num_point_featu... | 1,038 | 31.46875 | 99 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py | import torch
import torch.nn as nn
import torch.nn.functional as F
try:
import torch_scatter
except Exception as e:
# Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter
pass
from .vfe_template import VFETemplate
class PFNLayerV2(nn.Module):
def __init__(self,
... | 9,766 | 39.526971 | 118 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/pillar_vfe.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
super().__init__()
... | 5,099 | 40.129032 | 137 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe.py | import torch
from .vfe_template import VFETemplate
from .image_vfe_modules import ffn, f2v
class ImageVFE(VFETemplate):
def __init__(self, model_cfg, grid_size, point_cloud_range, depth_downsample_factor, **kwargs):
super().__init__(model_cfg=model_cfg)
self.grid_size = grid_size
self.pc_... | 2,526 | 28.383721 | 99 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/dynamic_voxel_vfe.py | import torch
import torch.nn as nn
import torch.nn.functional as F
try:
import torch_scatter
except Exception as e:
# Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter
pass
from .vfe_template import VFETemplate
from .dynamic_pillar_vfe import PFNLayerV2
class Dynam... | 4,491 | 40.981308 | 124 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py | import torch.nn as nn
import torch.nn.functional as F
from . import ddn, ddn_loss
from pcdet.models.model_utils.basic_block_2d import BasicBlock2D
class DepthFFN(nn.Module):
def __init__(self, model_cfg, downsample_factor):
"""
Initialize frustum feature network via depth distribution estimation... | 3,778 | 35.336538 | 96 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py | from .ddn_template import DDNTemplate
try:
import torchvision
except:
pass
class DDNDeepLabV3(DDNTemplate):
def __init__(self, backbone_name, **kwargs):
"""
Initializes DDNDeepLabV3 model
Args:
backbone_name: string, ResNet Backbone Name [ResNet50/ResNet101]
"... | 674 | 26 | 77 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_template.py | from collections import OrderedDict
from pathlib import Path
from torch import hub
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from kornia.enhance.normalize import normalize
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
c... | 5,935 | 35.417178 | 106 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py | import torch
import torch.nn as nn
from pcdet.utils import loss_utils
class Balancer(nn.Module):
def __init__(self, fg_weight, bg_weight, downsample_factor=1):
"""
Initialize fixed foreground/background loss balancer
Args:
fg_weight: float, Foreground loss weight
b... | 1,806 | 34.431373 | 102 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py | import torch
import torch.nn as nn
from .balancer import Balancer
from pcdet.utils import transform_utils
try:
from kornia.losses.focal import FocalLoss
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
class DDNLoss(nn.Module):
def __init__(self... | 2,428 | 30.960526 | 97 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py | import torch
import torch.nn as nn
from .frustum_grid_generator import FrustumGridGenerator
from .sampler import Sampler
class FrustumToVoxel(nn.Module):
def __init__(self, model_cfg, grid_size, pc_range, disc_cfg):
"""
Initializes module to transform frustum features to voxel features via 3D tr... | 2,338 | 41.527273 | 109 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_grid_generator.py | import torch
import torch.nn as nn
try:
from kornia.utils.grid import create_meshgrid3d
from kornia.geometry.linalg import transform_points
except Exception as e:
# Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions.
# print('Warning: kornia is not installed ... | 6,249 | 41.808219 | 201 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class Sampler(nn.Module):
def __init__(self, mode="bilinear", padding_mode="zeros"):
"""
Initializes module
Args:
mode: string, Sampling mode [bilinear/nearest]
paddin... | 1,187 | 30.263158 | 114 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_image/swin.py | # Copyright (c) OpenMMLab. All rights reserved.
"""
Mostly copy-paste from
https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/backbones/swin.py
"""
import warnings
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
... | 29,291 | 38.744912 | 113 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/backbones_image/img_neck/generalized_lss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ...model_utils.basic_block_2d import BasicBlock2D
class GeneralizedLSSFPN(nn.Module):
"""
This module implements FPN, which creates pyramid features built on top of some input feature maps.
This code is adapted from https://gi... | 2,907 | 36.766234 | 139 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/voxelnext_head.py | import numpy as np
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
from ..model_utils import centernet_utils
from ..model_utils import model_nms_utils
from ...utils import loss_utils
from ...utils.spconv_utils import replace_feature, spconv
import copy
from easydict import EasyDict
class ... | 26,566 | 46.441071 | 200 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/anchor_head_single.py | import numpy as np
import torch.nn as nn
from .anchor_head_template import AnchorHeadTemplate
class AnchorHeadSingle(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, **kwargs):
super... | 2,928 | 37.539474 | 136 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/point_head_template.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, loss_utils
class PointHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
... | 9,776 | 45.336493 | 119 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/anchor_head_template.py | import numpy as np
import torch
import torch.nn as nn
from ...utils import box_coder_utils, common_utils, loss_utils
from .target_assigner.anchor_generator import AnchorGenerator
from .target_assigner.atss_target_assigner import ATSSTargetAssigner
from .target_assigner.axis_aligned_target_assigner import AxisAlignedTa... | 12,364 | 43.800725 | 118 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/anchor_head_multi.py | import numpy as np
import torch
import torch.nn as nn
from ..backbones_2d import BaseBEVBackbone
from .anchor_head_template import AnchorHeadTemplate
class SingleHead(BaseBEVBackbone):
def __init__(self, model_cfg, input_channels, num_class, num_anchors_per_location, code_size, rpn_head_cfg=None,
... | 17,041 | 44.566845 | 117 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/center_head.py | import copy
import numpy as np
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
from ..model_utils import model_nms_utils
from ..model_utils import centernet_utils
from ...utils import loss_utils
from functools import partial
class SeparateHead(nn.Module):
def __init__(self, input_chan... | 19,859 | 46.625899 | 194 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/point_head_box.py | import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointHeadBox(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PointRCNN.
Reference Paper: https://arxiv.org/abs/1812.04244
PointRCNN: 3D Object Propo... | 4,930 | 41.508621 | 106 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/point_head_simple.py | import torch
from ...utils import box_utils
from .point_head_template import PointHeadTemplate
class PointHeadSimple(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel ... | 3,568 | 37.793478 | 106 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/transfusion_head.py | import copy
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import kaiming_normal_
from ..model_utils.transfusion_utils import clip_sigmoid
from ..model_utils.basic_block_2d import BasicBlock2D
from ..model_utils.transfusion_utils import PositionEmbeddingLearned, ... | 21,250 | 43.272917 | 146 | py |
OpenPCDet | OpenPCDet-master/pcdet/models/dense_heads/point_intra_part_head.py | import torch
from ...utils import box_coder_utils, box_utils
from .point_head_template import PointHeadTemplate
class PointIntraPartOffsetHead(PointHeadTemplate):
"""
Point-based head for predicting the intra-object part locations.
Reference Paper: https://arxiv.org/abs/1907.03670
From Points to Part... | 5,568 | 42.507813 | 107 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.