repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
RVT | RVT-master/callbacks/gradflow.py | from typing import Any
import pytorch_lightning as pl
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from callbacks.utils.visualization import get_grad_flow_figure
class GradFlowLogCallback(Callback):
def __init__(self, log_every_n_train_steps: ... | 1,133 | 41 | 110 | py |
RVT | RVT-master/callbacks/viz_base.py | import random
from enum import Enum
from typing import Any, List, Optional, Type, Union
import numpy as np
import pytorch_lightning as pl
import torch as th
from einops import rearrange, reduce
from omegaconf import DictConfig
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities.rank_zero ... | 6,391 | 35.525714 | 120 | py |
RVT | RVT-master/callbacks/detection.py | from enum import Enum, auto
from typing import Any
import torch
from einops import rearrange
from omegaconf import DictConfig
from data.utils.types import ObjDetOutput
from loggers.wandb_logger import WandbLogger
from utils.evaluation.prophesee.visualize.vis_utils import LABELMAP_GEN1, LABELMAP_GEN4_SHORT, draw_bboxe... | 4,027 | 38.881188 | 106 | py |
RVT | RVT-master/models/layers/rnn.py | from typing import Optional, Tuple
import torch as th
import torch.nn as nn
class DWSConvLSTM2d(nn.Module):
"""LSTM with (depthwise-separable) Conv option in NCHW [channel-first] format.
"""
def __init__(self,
dim: int,
dws_conv: bool = True,
dws_conv_o... | 2,624 | 36.5 | 101 | py |
RVT | RVT-master/models/layers/maxvit/maxvit.py | """
Part of this code stems from rwightman's MaxVit implementation:
https://github.com/huggingface/pytorch-image-models/blob/1885bdc4318cc3be459981ea1a26cd862220864d/timm/models/maxxvit.py
that is:
- LayerScale
- PartitionAttentionCl
- window*
- grid*
- SelfAttentionCl
"""
from enum import Enum, auto
from functools im... | 14,829 | 37.519481 | 143 | py |
RVT | RVT-master/models/layers/maxvit/layers/split_batchnorm.py | """ Split BatchNorm
A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through
a separate BN layer. The first split is passed through the parent BN layers with weight/bias
keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'
namespace.
Thi... | 3,441 | 44.289474 | 118 | py |
RVT | RVT-master/models/layers/maxvit/layers/halo_attn.py | """ Halo Self Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
@misc{2103.12731,
Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and
Jonathon Shlens},
Title = {Scaling Local Self... | 10,662 | 44.568376 | 117 | py |
RVT | RVT-master/models/layers/maxvit/layers/blur_pool.py | """
BlurPool layer inspired by
- Kornia's Max_BlurPool2d
- Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar`
Hacked together by Chris Ha and Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .padding import get_padding
class ... | 1,594 | 36.093023 | 106 | py |
RVT | RVT-master/models/layers/maxvit/layers/separable_conv.py | """ Depthwise Separable Conv Modules
Basic DWS convs. Other variations of DWS exist with batch norm or activations between the
DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception.
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from .create_conv2d... | 2,620 | 33.038961 | 110 | py |
RVT | RVT-master/models/layers/maxvit/layers/ml_decoder.py | from typing import Optional
import torch
from torch import nn
from torch import nn, Tensor
from torch.nn.modules.transformer import _get_activation_fn
def add_ml_decoder_head(model):
if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50
model.global_pool = nn.Identi... | 7,008 | 43.643312 | 111 | py |
RVT | RVT-master/models/layers/maxvit/layers/std_conv.py | """ Convolution with Weight Standardization (StdConv and ScaledStdConv)
StdConv:
@article{weightstandardization,
author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille},
title = {Weight Standardization},
journal = {arXiv preprint arXiv:1903.10520},
year = {2019},
}
Code:... | 5,887 | 42.940299 | 109 | py |
RVT | RVT-master/models/layers/maxvit/layers/pos_embed.py | import math
from typing import List, Tuple, Optional, Union
import torch
from torch import nn as nn
def pixel_freq_bands(
num_bands: int,
max_freq: float = 224.,
linear_bands: bool = True,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
):
if lin... | 7,140 | 33.331731 | 119 | py |
RVT | RVT-master/models/layers/maxvit/layers/create_norm.py | """ Norm Layer Factory
Create norm modules by string (to mirror create_act and creat_norm-act fns)
Copyright 2022 Ross Wightman
"""
import types
import functools
import torch.nn as nn
from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d
_NORM_MAP = dict(
batchnorm=nn.BatchNorm2d,
batchnorm2d=nn.... | 1,814 | 30.842105 | 90 | py |
RVT | RVT-master/models/layers/maxvit/layers/mixed_conv2d.py | """ PyTorch Mixed Convolution
Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595)
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from .conv2d_same import create_conv2d_pad
def _split_channels(num_chan, num_groups):
split = [nu... | 1,843 | 34.461538 | 99 | py |
RVT | RVT-master/models/layers/maxvit/layers/weight_init.py | import torch
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_no... | 4,765 | 36.825397 | 93 | py |
RVT | RVT-master/models/layers/maxvit/layers/attention_pool2d.py | """ Attention Pool 2D
Implementations of 2D spatial feature pooling using multi-head attention instead of average pool.
Based on idea in CLIP by OpenAI, licensed Apache 2.0
https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
Hacked together by / Copyright 2021 Ross Wightman
"""... | 4,927 | 36.333333 | 107 | py |
RVT | RVT-master/models/layers/maxvit/layers/squeeze_excite.py | """ Squeeze-and-Excitation Channel Attention
An SE implementation originally based on PyTorch SE-Net impl.
Has since evolved with additional functionality / configuration.
Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
Also included is Effective Squeeze-Excitation (ESE).
Paper: `CenterMa... | 3,029 | 39.4 | 102 | py |
RVT | RVT-master/models/layers/maxvit/layers/norm.py | """ Normalization layers and wrappers
Norm layer definitions that support fast norm and consistent channel arg order (always first arg).
Hacked together by / Copyright 2022 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .fast_norm import is_fast_norm, fast_group_norm, fast... | 4,517 | 37.288136 | 115 | py |
RVT | RVT-master/models/layers/maxvit/layers/non_local_attn.py | """ Bilinear-Attention-Transform and Non-Local Attention
Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms`
- https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html
Adapted from original code:... | 6,218 | 41.59589 | 154 | py |
RVT | RVT-master/models/layers/maxvit/layers/evo_norm.py | """ EvoNorm in PyTorch
Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967
@inproceedings{NEURIPS2020,
author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato ... | 13,862 | 38.271955 | 114 | py |
RVT | RVT-master/models/layers/maxvit/layers/pool2d_same.py | """ AvgPool2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Tuple, Optional
from .helpers import to_2tuple
from .padding import pad_same, get_padding_value
def avg_pool2d_same(x, kernel_size: List[int... | 3,045 | 40.162162 | 106 | py |
RVT | RVT-master/models/layers/maxvit/layers/create_act.py | """ Activation Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Union, Callable, Type
from .activations import *
from .activations_jit import *
from .activations_me import *
from .config import is_exportable, is_scriptable, is_no_jit
# PyTorch has an optimized, native 'silu' (aka 'swis... | 5,245 | 32.845161 | 105 | py |
RVT | RVT-master/models/layers/maxvit/layers/fast_norm.py | """ 'Fast' Normalization Functions
For GroupNorm and LayerNorm these functions bypass typical AMP upcast to float32.
Additionally, for LayerNorm, the APEX fused LN is used if available (which also does not upcast)
Hacked together by / Copyright 2022 Ross Wightman
"""
from typing import List, Optional
import torch
f... | 2,426 | 29.721519 | 96 | py |
RVT | RVT-master/models/layers/maxvit/layers/mlp.py | """ MLP module w/ dropout and configurable activation layer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from .helpers import to_2tuple
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidd... | 4,376 | 33.464567 | 119 | py |
RVT | RVT-master/models/layers/maxvit/layers/trace_utils.py | try:
from torch import _assert
except ImportError:
def _assert(condition: bool, message: str):
assert condition, message
def _float_to_int(x: float) -> int:
"""
Symbolic tracing helper to substitute for inbuilt `int`.
Hint: Inbuilt `int` can't accept an argument of type `Proxy`
"""
... | 335 | 23 | 64 | py |
RVT | RVT-master/models/layers/maxvit/layers/bottleneck_attn.py | """ Bottleneck Self Attention (Bottleneck Transformers)
Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
@misc{2101.11605,
Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani},
Title = {Bottleneck Transformers f... | 6,895 | 42.64557 | 118 | py |
RVT | RVT-master/models/layers/maxvit/layers/classifier.py | """ Classifier head and layer factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from torch.nn import functional as F
from .adaptive_avgmax_pool import SelectAdaptivePool2d
def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False):
flatten_in_pool = not u... | 2,320 | 39.719298 | 111 | py |
RVT | RVT-master/models/layers/maxvit/layers/cond_conv2d.py | """ PyTorch Conditionally Parameterized Convolution (CondConv)
Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference
(https://arxiv.org/abs/1904.04971)
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from functools import partial
import numpy as np
import torch
from torc... | 5,199 | 40.935484 | 119 | py |
RVT | RVT-master/models/layers/maxvit/layers/patch_embed.py | """ Image to Patch Embedding using Conv2d
A convolution based approach to patchifying a 2D image w/ embedding projection.
Based on the impl in https://github.com/google-research/vision_transformer
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from .helpers import to_2tuple
from .t... | 1,490 | 36.275 | 110 | py |
RVT | RVT-master/models/layers/maxvit/layers/lambda_layer.py | """ Lambda Layer
Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention`
- https://arxiv.org/abs/2102.08602
@misc{2102.08602,
Author = {Irwan Bello},
Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention},
Year = {2021},
}
Status:
This impl is a WIP. Code snippets in the... | 5,941 | 43.343284 | 118 | py |
RVT | RVT-master/models/layers/maxvit/layers/gather_excite.py | """ Gather-Excite Attention Block
Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348
Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet
I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen anoth... | 3,824 | 41.032967 | 120 | py |
RVT | RVT-master/models/layers/maxvit/layers/conv2d_same.py | """ Conv2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from .padding import pad_same, get_padding_value
def conv2d_same(
x, weight: torch.Tensor, bias: Optional[torch.Tensor] = Non... | 1,490 | 33.674419 | 108 | py |
RVT | RVT-master/models/layers/maxvit/layers/adaptive_avgmax_pool.py | """ PyTorch selectable adaptive pooling
Adaptive pooling with the ability to select the type of pooling from:
* 'avg' - Average pooling
* 'max' - Max pooling
* 'avgmax' - Sum of average and max pooling re-scaled by 0.5
* 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles fea... | 3,890 | 31.697479 | 111 | py |
RVT | RVT-master/models/layers/maxvit/layers/conv_bn_act.py | """ Conv2d + BN + Act
Hacked together by / Copyright 2020 Ross Wightman
"""
import functools
from torch import nn as nn
from .create_conv2d import create_conv2d
from .create_norm_act import get_norm_act_layer
class ConvNormAct(nn.Module):
def __init__(
self, in_channels, out_channels, kernel_size=1,... | 3,188 | 34.831461 | 118 | py |
RVT | RVT-master/models/layers/maxvit/layers/linear.py | """ Linear layer (alternate definition)
"""
import torch
import torch.nn.functional as F
from torch import nn as nn
class Linear(nn.Linear):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting
weight &... | 743 | 36.2 | 89 | py |
RVT | RVT-master/models/layers/maxvit/layers/config.py | """ Model / Layer Config singleton state
"""
from typing import Any, Optional
__all__ = [
'is_exportable', 'is_scriptable', 'is_no_jit',
'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config'
]
# Set to True if prefer to have layers with no jit optimization (includes activations)
_NO_JIT = False... | 3,069 | 25.465517 | 102 | py |
RVT | RVT-master/models/layers/maxvit/layers/cbam.py | """ CBAM (sort-of) Attention
Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521
WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on
some tasks, especially fine-grained it seems. I may end up removing this impl.
Hack... | 4,426 | 38.176991 | 106 | py |
RVT | RVT-master/models/layers/maxvit/layers/activations_jit.py | """ Activations
A collection of jit-scripted activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not
currently work across in-place op bou... | 2,529 | 26.802198 | 107 | py |
RVT | RVT-master/models/layers/maxvit/layers/activations_me.py | """ Activations (memory-efficient w/ custom autograd)
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
These activations are not compatible with jit scripting or ONNX export of the model, please use either
the JIT or bas... | 5,886 | 25.881279 | 163 | py |
RVT | RVT-master/models/layers/maxvit/layers/split_attn.py | """ Split Attention Conv2d (for ResNeSt Models)
Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955
Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt
Modified for torchscript compat, performance, and consistency with timm by Ross Wightman
"""
import torch
impor... | 3,076 | 35.2 | 106 | py |
RVT | RVT-master/models/layers/maxvit/layers/global_context.py | """ Global Context Attention Block
Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond`
- https://arxiv.org/abs/1904.11492
Official code consulted as reference: https://github.com/xvjiarui/GCNet
Hacked together by / Copyright 2021 Ross Wightman
"""
from torch import nn as nn
import torc... | 2,445 | 34.970588 | 105 | py |
RVT | RVT-master/models/layers/maxvit/layers/activations.py | """ Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace:... | 4,040 | 26.678082 | 107 | py |
RVT | RVT-master/models/layers/maxvit/layers/eca.py | """
ECA module from ECAnet
paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
https://arxiv.org/abs/1910.03151
Original ECA model borrowed from https://github.com/BangguWu/ECANet
Modified circular ECA implementation and adaption for use in timm package
by Chris Ha https://github.com/V... | 6,386 | 42.746575 | 108 | py |
RVT | RVT-master/models/layers/maxvit/layers/space_to_depth.py | import torch
import torch.nn as nn
class SpaceToDepth(nn.Module):
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs)... | 1,750 | 31.425926 | 102 | py |
RVT | RVT-master/models/layers/maxvit/layers/create_attn.py | """ Attention Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from functools import partial
from .bottleneck_attn import BottleneckAttn
from .cbam import CbamModule, LightCbamModule
from .eca import EcaModule, CecaModule
from .gather_excite import GatherExcite
from .global_context import Gl... | 3,514 | 38.055556 | 109 | py |
RVT | RVT-master/models/layers/maxvit/layers/median_pool.py | """ Median Pool
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.nn as nn
import torch.nn.functional as F
from .helpers import to_2tuple, to_4tuple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kern... | 1,737 | 33.76 | 87 | py |
RVT | RVT-master/models/layers/maxvit/layers/test_time_pool.py | """ Test Time Pooling (Average-Max Pool)
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from torch import nn
import torch.nn.functional as F
from .adaptive_avgmax_pool import adaptive_avgmax_pool2d
_logger = logging.getLogger(__name__)
class TestTimePoolHead(nn.Module):
def __init__(sel... | 1,996 | 36.679245 | 101 | py |
RVT | RVT-master/models/layers/maxvit/layers/selective_kernel.py | """ Selective Kernel Convolution/Attention
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from .conv_bn_act import ConvNormActAa
from .helpers import make_divisible
from .trace_utils import _assert
de... | 5,387 | 43.9 | 116 | py |
RVT | RVT-master/models/layers/maxvit/layers/norm_act.py | """ Normalization + Activation Layers
Provides Norm+Act fns for standard PyTorch norm layers such as
* BatchNorm
* GroupNorm
* LayerNorm
This allows swapping with alternative layers that are natively both norm + act such as
* EvoNorm (evo_norm.py)
* FilterResponseNorm (filter_response_norm.py)
* InplaceABN (inplace_a... | 10,397 | 40.098814 | 115 | py |
RVT | RVT-master/models/layers/maxvit/layers/filter_response_norm.py | """ Filter Response Norm in PyTorch
Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
import torch.nn as nn
from .create_act import create_act_layer
from .trace_utils import _assert
def inv_instance_rms(x, eps: float... | 2,540 | 35.826087 | 111 | py |
RVT | RVT-master/models/layers/maxvit/layers/create_conv2d.py | """ Create Conv2d Factory Method
Hacked together by / Copyright 2020 Ross Wightman
"""
from .mixed_conv2d import MixedConv2d
from .cond_conv2d import CondConv2d
from .conv2d_same import create_conv2d_pad
def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
""" Select a 2d convolution implementat... | 1,622 | 42.864865 | 101 | py |
RVT | RVT-master/models/layers/maxvit/layers/create_norm_act.py | """ NormAct (Normalizaiton + Activation Layer) Factory
Create norm + act combo modules that attempt to be backwards compatible with separate norm + act
isntances in models. Where these are used it will be possible to swap separate BN + act layers with
combined modules like IABN or EvoNorms.
Hacked together by / Copyr... | 3,748 | 39.75 | 106 | py |
RVT | RVT-master/models/layers/maxvit/layers/padding.py | """ Padding Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from typing import List, Tuple
import torch.nn.functional as F
# Calculate symmetric padding for a convolution
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
padding = ((stride - 1) + dilati... | 2,167 | 37.035088 | 99 | py |
RVT | RVT-master/models/layers/maxvit/layers/drop.py | """ DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl ins... | 6,872 | 39.429412 | 118 | py |
RVT | RVT-master/models/layers/maxvit/layers/inplace_abn.py | import torch
from torch import nn as nn
try:
from inplace_abn.functions import inplace_abn, inplace_abn_sync
has_iabn = True
except ImportError:
has_iabn = False
def inplace_abn(x, weight, bias, running_mean, running_var,
training=True, momentum=0.1, eps=1e-05, activation="leaky_re... | 3,374 | 37.352273 | 111 | py |
RVT | RVT-master/models/detection/yolox/models/losses.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
import torch
import torch.nn as nn
class IOUloss(nn.Module):
def __init__(self, reduction="none", loss_type="iou"):
super(IOUloss, self).__init__()
self.reduction = reduction
self.loss_type = ... | 1,729 | 29.892857 | 84 | py |
RVT | RVT-master/models/detection/yolox/models/yolo_head.py | """
Original Yolox Head code with slight modifications
"""
import math
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from torch import compile as th_compile
except ImportError:
th_compile = None
from models.detection.yolox.utils import bboxes_iou
fr... | 22,240 | 35.640857 | 93 | py |
RVT | RVT-master/models/detection/yolox/models/network_blocks.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
import torch
import torch.nn as nn
class SiLU(nn.Module):
"""export-friendly version of nn.SiLU()"""
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
def get_activation(name="silu", inplac... | 3,959 | 26.887324 | 88 | py |
RVT | RVT-master/models/detection/yolox/utils/compat.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import torch
_TORCH_VER = [int(x) for x in torch.__version__.split(".")[:2]]
__all__ = ["meshgrid"]
def meshgrid(*tensors):
if _TORCH_VER >= [1, 10]:
return torch.meshgrid(*tensors, indexing="ij")
else:
return torch.meshgrid(*tensors)
| 310 | 18.4375 | 63 | py |
RVT | RVT-master/models/detection/yolox/utils/boxes.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
import numpy as np
import torch
import torchvision
__all__ = [
"filter_box",
"postprocess",
"bboxes_iou",
"matrix_iou",
"adjust_box_anns",
"xyxy2xywh",
"xyxy2cxcywh",
]
def filter_box(output, ... | 4,471 | 31.882353 | 94 | py |
RVT | RVT-master/models/detection/recurrent_backbone/base.py | from typing import Tuple
import torch.nn as nn
class BaseDetector(nn.Module):
def get_stage_dims(self, stages: Tuple[int, ...]) -> Tuple[int, ...]:
raise NotImplementedError
def get_strides(self, stages: Tuple[int, ...]) -> Tuple[int, ...]:
raise NotImplementedError
| 295 | 23.666667 | 73 | py |
RVT | RVT-master/models/detection/recurrent_backbone/maxvit_rnn.py | from typing import Dict, Optional, Tuple
import torch as th
import torch.nn as nn
from omegaconf import DictConfig, OmegaConf
try:
from torch import compile as th_compile
except ImportError:
th_compile = None
from data.utils.types import FeatureMap, BackboneFeatures, LstmState, LstmStates
from models.layers.... | 8,012 | 42.786885 | 119 | py |
RVT | RVT-master/models/detection/yolox_extension/models/detector.py | from typing import Dict, Optional, Tuple, Union
import torch as th
from omegaconf import DictConfig
try:
from torch import compile as th_compile
except ImportError:
th_compile = None
from ...recurrent_backbone import build_recurrent_backbone
from .build import build_yolox_fpn, build_yolox_head
from utils.tim... | 2,962 | 39.589041 | 99 | py |
RVT | RVT-master/models/detection/yolox_extension/models/yolo_pafpn.py | """
Original Yolox PAFPN code with slight modifications
"""
from typing import Dict, Optional, Tuple
import torch as th
import torch.nn as nn
try:
from torch import compile as th_compile
except ImportError:
th_compile = None
from ...yolox.models.network_blocks import BaseConv, CSPLayer, DWConv
from data.util... | 4,412 | 30.521429 | 100 | py |
RVT | RVT-master/scripts/genx/preprocess_dataset.py | import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
from abc import ABC, abstractmethod
import argparse
from dataclasses import dataclass, field
from enum import En... | 32,576 | 39.72125 | 177 | py |
RVT | RVT-master/loggers/wandb_logger.py | """
This is a modified version of the Pytorch Lightning logger
"""
import time
from argparse import Namespace
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from weakref import ReferenceType
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
pl_is_ge_1_... | 16,348 | 42.366048 | 159 | py |
RVT | RVT-master/utils/timers.py | import atexit
import time
from functools import wraps
import numpy as np
import torch
cuda_timers = {}
timers = {}
class CudaTimer:
def __init__(self, device: torch.device, timer_name: str):
assert isinstance(device, torch.device)
assert isinstance(timer_name, str)
self.timer_name = time... | 2,708 | 27.21875 | 120 | py |
RVT | RVT-master/utils/helpers.py | from typing import Union
import torch as th
def torch_uniform_sample_scalar(min_value: float, max_value: float):
assert max_value >= min_value, f'{max_value=} is smaller than {min_value=}'
if max_value == min_value:
return min_value
return min_value + (max_value - min_value) * th.rand(1).item()
... | 461 | 29.8 | 93 | py |
RVT | RVT-master/utils/padding.py | from typing import Any, List, Tuple
import torch as th
import torch.nn.functional as F
class InputPadderFromShape:
def __init__(self, desired_hw: Tuple[int, int], mode: str = 'constant', value: int = 0, type: str = 'corner'):
"""
:param desired_hw: Desired height and width
:param mode: Se... | 2,476 | 36.530303 | 114 | py |
RVT | RVT-master/utils/evaluation/prophesee/io/box_loading.py | """
Defines some tools to handle events.
In particular :
-> defines events' types
-> defines functions to read events from binary .dat files using numpy
-> defines functions to write events to binary .dat files using numpy
Copyright: (c) 2019-2020 Prophesee
"""
from __future__ import print_function
from t... | 4,375 | 42.76 | 146 | py |
RVT | RVT-master/data/genx_utils/dataset_streaming.py | from functools import partialmethod
from pathlib import Path
from typing import List, Union
from omegaconf import DictConfig
from torchdata.datapipes.map import MapDataPipe
from tqdm import tqdm
from data.genx_utils.sequence_for_streaming import SequenceForIter, RandAugmentIterDataPipe
from data.utils.stream_concat_d... | 4,570 | 42.122642 | 119 | py |
RVT | RVT-master/data/genx_utils/labels.py | from __future__ import annotations
from typing import List, Tuple, Union, Optional
import math
import numpy as np
import torch as th
from einops import rearrange
from torch.nn.functional import pad
class ObjectLabelBase:
_str2idx = {
't': 0,
'x': 1,
'y': 2,
'w': 3,
'h': 4... | 17,557 | 37.086768 | 115 | py |
RVT | RVT-master/data/genx_utils/sequence_rnd.py | from pathlib import Path
from data.genx_utils.labels import SparselyBatchedObjectLabels
from data.genx_utils.sequence_base import SequenceBase
from data.utils.types import DataType, DatasetType, LoaderDataDictGenX
from utils.timers import TimerDummy as Timer
class SequenceForRandomAccess(SequenceBase):
def __ini... | 3,324 | 38.117647 | 96 | py |
RVT | RVT-master/data/genx_utils/dataset_rnd.py | from collections import namedtuple
from collections.abc import Iterable
from pathlib import Path
from typing import List
import numpy as np
from omegaconf import DictConfig
from torch.utils.data import ConcatDataset, Dataset
from torch.utils.data.sampler import WeightedRandomSampler
from tqdm import tqdm
from data.ge... | 6,088 | 39.593333 | 114 | py |
RVT | RVT-master/data/genx_utils/sequence_for_streaming.py | from pathlib import Path
from typing import List, Optional, Union, Tuple
import h5py
import numpy as np
import torch
from omegaconf import DictConfig
from torchdata.datapipes.iter import IterDataPipe
from data.genx_utils.labels import SparselyBatchedObjectLabels
from data.genx_utils.sequence_base import SequenceBase,... | 8,541 | 40.668293 | 120 | py |
RVT | RVT-master/data/genx_utils/collate.py | from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Type, Tuple, Union
import torch
from data.genx_utils.collate_from_pytorch import collate, default_collate_fn_map
from data.genx_utils.labels import ObjectLabels, SparselyBatchedObjectLabels
def collate_object_labels(batch, *, collate_fn_map... | 1,631 | 32.306122 | 120 | py |
RVT | RVT-master/data/genx_utils/sequence_base.py | from pathlib import Path
from typing import Any, List, Optional
import h5py
import numpy as np
import torch
from torchdata.datapipes.map import MapDataPipe
from data.genx_utils.labels import ObjectLabelFactory, ObjectLabels
from data.utils.spatial import get_original_hw
from data.utils.types import DatasetType
from u... | 4,259 | 39.571429 | 108 | py |
RVT | RVT-master/data/genx_utils/collate_from_pytorch.py | import collections
import contextlib
import re
import torch
torch_is_version_1 = int(torch.__version__.split('.')[0]) == 1
from typing import Callable, Dict, Optional, Tuple, Type, Union
np_str_obj_array_pattern = re.compile(r'[SaUO]')
default_collate_err_msg_format = (
"default_collate: batch must contain ten... | 6,827 | 45.767123 | 118 | py |
RVT | RVT-master/data/utils/augmentor.py | import collections.abc as abc
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
from warnings import filterwarnings, warn
import torch as th
import torch.distributions.categorical
from omegaconf import DictConfig
from torch.nn.functional import interpolate
from torchvision.transforms imp... | 21,852 | 47.670379 | 121 | py |
RVT | RVT-master/data/utils/stream_sharded_datapipe.py | from typing import Any, List, Optional
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
from torchdata.datapipes.iter import Concater, IterableWrapper, IterDataPipe, ZipperLongest
from torchdata.datapipes.map import MapDataPipe
class ShardedStreamingDataPipe(IterDataPipe):
de... | 5,069 | 52.368421 | 118 | py |
RVT | RVT-master/data/utils/types.py | from enum import auto, Enum
try:
from enum import StrEnum
except ImportError:
from strenum import StrEnum
from typing import Dict, List, Optional, Tuple, Union
import torch as th
from data.genx_utils.labels import ObjectLabels, SparselyBatchedObjectLabels
class DataType(Enum):
EV_REPR = auto()
FLOW... | 1,118 | 18.982143 | 114 | py |
RVT | RVT-master/data/utils/stream_concat_datapipe.py | from typing import Any, Iterator, List, Optional, Type
import torch as th
import torch.distributed as dist
from torch.utils.data import DataLoader
from torchdata.datapipes.iter import (
Concater,
IterableWrapper,
IterDataPipe,
Zipper,
)
from torchdata.datapipes.map import MapDataPipe
class DummyIterD... | 4,194 | 39.336538 | 115 | py |
RVT | RVT-master/data/utils/representations.py | from abc import ABC, abstractmethod
from typing import Optional, Tuple
import math
import numpy as np
import torch as th
class RepresentationBase(ABC):
@abstractmethod
def construct(self, x: th.Tensor, y: th.Tensor, pol: th.Tensor, time: th.Tensor) -> th.Tensor:
...
@abstractmethod
def get_s... | 7,632 | 33.853881 | 120 | py |
scSemiCluster | scSemiCluster-master/codes/scSemiCluster_model.py | import math
import random
import tensorflow as tf
import keras.backend as K
from keras.layers import GaussianNoise, Dense, Activation
from scSemiCluster_preprocess import *
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
import os
import time
MeanAct = lambda x: tf.clip_by_value(K.e... | 20,744 | 45.202673 | 153 | py |
LAVA | LAVA-main/lava.py | import torch
import torchvision
import otdd
from otdd.pytorch.datasets import load_imagenet, load_torchvision_data, load_torchvision_data_shuffle, load_torchvision_data_perturb, load_torchvision_data_keepclean
from otdd.pytorch.distance_fast import DatasetDistance, FeatureCost
import matplotlib.pyplot as plt
from to... | 15,049 | 32.668904 | 165 | py |
LAVA | LAVA-main/setup.py | from setuptools import find_packages, setup
setup(
name='lava',
version='2.6.22',
description='LAVA-model-agnostic-data-valuation',
author='anonymous',
license='MIT',
packages=find_packages(),
install_requires=[
'numpy',
'scipy',
'matplotlib',
'tqdm',
'pot',
... | 578 | 17.677419 | 53 | py |
LAVA | LAVA-main/vgg.py | import math
import torch.nn as nn
import torch.nn.init as init
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(nn.Module):
'''
VGG model
'''
def __init__(self, features):
super(VGG, self).__init__()
self.feat... | 2,901 | 26.638095 | 98 | py |
LAVA | LAVA-main/poi_util.py | import numpy as np
# import matplotlib.pyplot as plt
import random
import imageio
import torch.nn as nn
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def patching(clean_sample, attack, pert=None, intensity = 1, dataset_nm = 'CIFAR'):
'''
this code ... | 8,945 | 40.803738 | 132 | py |
LAVA | LAVA-main/preact_resnet.py | '''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.... | 4,078 | 33.277311 | 102 | py |
LAVA | LAVA-main/models/dla.py | '''DLA in PyTorch.
Reference:
Deep Layer Aggregation. https://arxiv.org/abs/1707.06484
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
sel... | 4,425 | 31.544118 | 83 | py |
LAVA | LAVA-main/models/shufflenetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
... | 5,530 | 32.932515 | 107 | py |
LAVA | LAVA-main/models/regnet.py | '''RegNet in PyTorch.
Paper: "Designing Network Design Spaces".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SE(nn.Module):
'''Squeeze-and-Excitation block.'''
def __in... | 4,548 | 28.160256 | 106 | py |
LAVA | LAVA-main/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x ... | 5,721 | 31.511364 | 106 | py |
LAVA | LAVA-main/models/pnasnet.py | '''PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SepConv(nn.Module):
'''Separable Convolution.'''
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
se... | 4,258 | 32.801587 | 105 | py |
LAVA | LAVA-main/models/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansi... | 4,461 | 30.64539 | 83 | py |
LAVA | LAVA-main/models/dla_simple.py | '''Simplified version of DLA in PyTorch.
Note this implementation is not identical to the original paper version.
But it seems works fine.
See dla.py for the original paper version.
Reference:
Deep Layer Aggregation. https://arxiv.org/abs/1707.06484
'''
import torch
import torch.nn as nn
import torch.nn.function... | 4,084 | 30.666667 | 83 | py |
LAVA | LAVA-main/models/mobilenetv2.py | '''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init... | 3,092 | 34.551724 | 114 | py |
LAVA | LAVA-main/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512... | 1,442 | 29.0625 | 117 | py |
LAVA | LAVA-main/models/densenet.py | '''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*gr... | 3,542 | 31.805556 | 96 | py |
LAVA | LAVA-main/models/preact_resnet.py | '''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.... | 4,079 | 33.285714 | 102 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.