repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/coco.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import os.path
import math
from PIL import Image, ImageDraw
import random
import numpy as np
import torch
import torchvision
import torch.utils.data as data
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_b... | 10,337 | 37.431227 | 110 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/od_to_grounding.py | import numpy as np
import random
import re
import torch
import pdb
import logging
def clean_name(name):
name = re.sub(r"\(.*\)", "", name)
name = re.sub(r"_", " ", name)
name = re.sub(r" ", " ", name)
return name
def sanity_check_target_after_processing(target):
assert(len(target.bbox) == len(t... | 14,306 | 37.050532 | 251 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/duplicate_dataset.py | import math
from typing import TypeVar, Optional, Iterator
import torch
from torch.utils.data import Sampler, Dataset
import torch.distributed as dist
import random
import numpy as np
def create_duplicate_dataset(DatasetBaseClass):
class DupDataset(DatasetBaseClass):
def __init__(self, copy, **kwargs):
... | 863 | 26 | 67 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/evaluation/box_aug.py | import torch
import numpy as np
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import transforms as T
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxli... | 13,285 | 37.068768 | 128 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py | # A modification version from chainercv repository.
# (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py)
from __future__ import division
import os
from collections import OrderedDict
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import B... | 26,747 | 38.744428 | 121 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis_eval.py | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import datetime
import json
import os
from collections import OrderedDict, defaultdict
import numpy as np
import pycocotools.m... | 37,466 | 36.542084 | 137 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis.py | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import time
from collections import defaultdict
import pycocotools.mask as mask_utils
import torchvision
from PIL im... | 6,625 | 30.855769 | 116 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/evaluation/od_to_grounding/od_eval.py | import logging
import tempfile
import os
import torch
import numpy as np
import json
from collections import OrderedDict
from tqdm import tqdm
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.... | 19,180 | 34.986867 | 92 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py | import logging
import tempfile
import os
import torch
import numpy as np
import json
from collections import OrderedDict
from tqdm import tqdm
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.... | 19,181 | 35.056391 | 92 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/datasets/evaluation/flickr/flickr_eval.py | from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.bounding_box import BoxList
import json
import numpy as np
import os.path as osp
import os
from prettytable import PrettyTable
import xml.etree.ElementTree as ET
from collections import defaultdict
from pathlib import ... | 16,552 | 36.535147 | 129 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enfo... | 4,845 | 40.775862 | 88 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch.utils.data.sampler import BatchSampler
class IterationBasedBatchSampler(BatchSampler):
"""
Wraps a BatchSampler, resampling from it until
a specified number of iterations have been sampled
"""
def __init__(self, ba... | 1,164 | 35.40625 | 71 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed.
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
from maskrcnn_benchmark.uti... | 2,794 | 37.287671 | 94 | py |
GLIP | GLIP-main/maskrcnn_benchmark/data/transforms/transforms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import random
import numpy as np
import math
import torch
import torchvision
from torchvision.transforms import functional as F
from maskrcnn_benchmark.structures.bounding_box import BoxList
def matrix_iou(a, b, relative=False):
""... | 14,010 | 35.297927 | 129 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/matcher.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned t... | 5,323 | 44.896552 | 88 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/make_layers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.layers import Conv2d, DYReLU
from maskrcnn_benchmark.modeling.poolers ... | 3,705 | 28.648 | 79 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if ... | 2,738 | 33.2375 | 97 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/poolers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import ROIAlign, ROIAlignV2
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map... | 4,410 | 34.007937 | 90 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class BalancedPositiveNegativeSampler(object):
"""
This class samples batches, ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image, positive_fraction):
"""
... | 2,716 | 38.376812 | 90 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/box_coder.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import torch
class BoxCoder(object):
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
"""
def __init__(self, weights, bbox_xform_clip=math.log(1... | 3,367 | 34.083333 | 86 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/efficientdet.py | import torch
import re
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import logging
import cv2
import math
import itertools
import collections
from torchvision.ops import nms
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_ra... | 78,693 | 40.791822 | 118 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/bifpn.py | import torch.nn as nn
import torch
from maskrcnn_benchmark.layers import swish
class BiFPN(nn.Module):
def __init__(self, in_channels_list, out_channels, first_time=False, epsilon=1e-4, attention=True):
super(BiFPN, self).__init__()
self.epsilon = epsilon
# Conv layers
self.conv6_... | 11,837 | 42.362637 | 104 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/mixer.py | import torch
from torch import nn
class MixedOperationRandom(nn.Module):
def __init__(self, search_ops):
super(MixedOperationRandom, self).__init__()
self.ops = nn.ModuleList(search_ops)
self.num_ops = len(search_ops)
def forward(self, x, x_path=None):
if x_path is None:
... | 1,018 | 43.304348 | 118 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/efficientnet.py | """
EfficientNet for ImageNet-1K, implemented in PyTorch.
Original papers:
- 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946,
- 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665.
"""
import os
import math
impo... | 22,448 | 31.440751 | 120 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/resnet.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Variant of the resnet module that takes cfg as an argument.
Example usage. Strings may be specified in the config file.
model = ResNet(
"StemWithFixedBatchNorm",
"BottleneckWithFixedBatchNorm",
"ResNet50StagesTo4",
... | 20,331 | 30.620529 | 90 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/swint_vl.py | # --------------------------------------------------------
# Swin Transformer
# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
# --------------------------------------------------------
import torch
import torch.nn as nn
import ... | 32,089 | 40.406452 | 139 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/fbnet.py | """
FBNet model builder
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import logging
import math
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn import BatchNorm2d, SyncBatchNorm
from maskrcnn_benchmark.layers import Conv2d, in... | 15,344 | 27.628731 | 87 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/swint_v2.py | # --------------------------------------------------------
# Swin Transformer
# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
# --------------------------------------------------------
import torch
import torch.nn as nn
import ... | 28,474 | 37.794278 | 139 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/fpn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
class FPN(nn.Module):
"""
Module that adds FPN on top of a list of feature maps.
The feature maps are currently supposed to be in increasing depth
order, and must be... | 6,880 | 40.203593 | 121 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/__init__.py | from collections import OrderedDict
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform
from maskrcnn_benchmark.layers import DropBlock2D, DyHead
from . import fpn as fpn_module
from . import bifpn
from . import resnet
fro... | 8,813 | 35.725 | 119 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/ops.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv7x7(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""7x7 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=7, stride=stride,
padding=3*dilation, groups=groups, bi... | 2,662 | 36.507042 | 86 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/swint.py | # --------------------------------------------------------
# Swin Transformer
# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
# --------------------------------------------------------
import torch
import torch.nn as nn
import ... | 25,846 | 38.764615 | 139 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/swint_v2_vl.py | # --------------------------------------------------------
# Swin Transformer
# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
# --------------------------------------------------------
import torch
import torch.nn as nn
import ... | 34,947 | 39.590012 | 139 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/backbone/blocks.py | import torch.nn as nn
from .ops import *
class stem(nn.Module):
num_layer = 1
def __init__(self, conv, inplanes, planes, stride=1, norm_layer=nn.BatchNorm2d):
super(stem, self).__init__()
self.conv1 = conv(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn... | 8,413 | 30.631579 | 105 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/detector/generalized_vl_rcnn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized VL R-CNN framework
"""
import torch
from torch import nn
import torch.nn.functional as F
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.bounding_box import BoxL... | 21,715 | 45.501071 | 236 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from maskrcnn_benchmark.structures.image_list import to_image_list
from ..backbone import build_backbone
from ..rpn import build_rpn
from ..roi_heads import bui... | 4,792 | 37.653226 | 119 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import torch
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.structures.bounding_box import BoxList, _onnx_clip_boxes_to_image
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
fro... | 32,376 | 37.045828 | 140 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/anchor_generator.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.image_list import ImageList
from maskrcnn_benchmark.structures.boxlist_ops import c... | 15,992 | 36.542254 | 107 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/modeling_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | 12,601 | 44.992701 | 137 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains specific functions for computing losses on the RPN
file
"""
import torch
from torch import nn
from torch.nn import functional as F
from ..balanced_positive_negative_sampler import BalancedPositiveNegativeSampler
from ..util... | 62,431 | 48.865815 | 166 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/rpn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from .loss import make_rpn_loss_evaluator
from .anchor_generator import m... | 6,590 | 37.54386 | 88 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/atss.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from .inference import make_atss_postprocessor
from .loss import make_atss_loss_evaluator
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.layers import Scale, DFConv2d, DYReLU, SELayer
from .anchor_... | 8,680 | 36.098291 | 110 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/transformer.py | import torch
import torch.nn.functional as F
from torch import nn, Tensor
import copy
from typing import Optional, List
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if a... | 1,861 | 34.132075 | 79 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/fcos.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.layers import Scale, DFConv2d
from .loss import make_fcos_loss_evaluator
from .anchor_generator import make_center_anchor_generator
from .inference import make_fcos_pos... | 8,482 | 34.793249 | 104 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/retina.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from .loss import make_focal_loss_evaluator
from .anchor_gene... | 5,485 | 33.942675 | 92 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/vldyhead.py | import torch
import torch.nn.functional as F
from torch import nn
from collections import defaultdict
from .inference import make_atss_postprocessor
from .loss import make_atss_loss_evaluator
from .anchor_generator import make_anchor_generator_complex
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
... | 46,595 | 43.933462 | 186 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/rpn/dyhead.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from .inference import make_atss_postprocessor
from .loss import make_atss_loss_evaluator
from .anchor_generator import make_anchor_generator_complex
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.... | 14,965 | 38.592593 | 154 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .box_head.box_head import build_roi_box_head
from .mask_head.mask_head import build_roi_mask_head
from .keypoint_head.keypoint_head import build_roi_keypoint_head
class CombinedROIHeads(torch.nn.ModuleDict):
"""
Combine... | 3,584 | 41.678571 | 120 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from maskrcnn_benchmark.structures.bounding_box import BoxList
def convert_mask_grounding_to_od_logits(logits, positive_map_label_to_token, num_classes):
od... | 7,802 | 33.68 | 129 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
from .hourglass import Hourglass
from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benc... | 4,026 | 33.127119 | 82 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmar... | 7,291 | 39.511111 | 124 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import Conv2d, _NewEmptyTensorOp
from maskrcnn_benchmark.layers import ConvTranspose2d
from ...utils import permute_and_flatten
class MaskRCNNC... | 4,850 | 42.3125 | 111 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .roi_mask_feature_extractors import make_roi_mask_feature_extractor
from .roi_mask_predictors import make_roi_mask_predictor
from .inference imp... | 3,402 | 37.235955 | 93 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/mask_head/hourglass.py | from torch import nn
from maskrcnn_benchmark.modeling.make_layers import make_conv3x3
class Residual(nn.Module):
def __init__(self, inp_dim, out_dim, use_gn=False):
super(Residual, self).__init__()
self.relu = nn.ReLU()
# self.bn1 = nn.BatchNorm2d(inp_dim)
self.conv1 = make_conv3x... | 2,156 | 32.184615 | 103 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops impor... | 7,409 | 40.629213 | 103 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.backbone import resnet
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_be... | 7,292 | 35.10396 | 129 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_... | 2,917 | 37.394737 | 96 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.struc... | 7,313 | 37.904255 | 116 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
class FastRCNNPredictor(nn.Module):
def __init__(self, config, pretrained=None):
super(FastRCNNPredictor, self).__init__()
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
r... | 2,066 | 31.809524 | 72 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py | import cv2
import numpy as np
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
class KeypointPostProcessor(nn.Module):
def __init__(self, keypointer=None):
super(KeypointPostProcessor, self)... | 4,422 | 35.553719 | 102 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/roi_keypoint_feature_extractors.py | from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.layers import ConvTranspose2d
class KeypointRCNNFeatureExtractor(nn.Module):
def __init__(self, cfg):
super(KeypointRCNNFe... | 3,820 | 38.802083 | 96 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/loss.py | import torch
from torch.nn import functional as F
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler,
)
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeli... | 7,102 | 37.814208 | 90 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/keypoint_head.py | import torch
from .roi_keypoint_feature_extractors import make_roi_keypoint_feature_extractor
from .roi_keypoint_predictors import make_roi_keypoint_predictor
from .inference import make_roi_keypoint_post_processor
from .loss import make_roi_keypoint_loss_evaluator
class ROIKeypointHead(torch.nn.Module):
def __i... | 1,954 | 38.1 | 81 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/roi_keypoint_predictors.py | from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark import layers
class KeypointRCNNPredictor(nn.Module):
def __init__(self, cfg):
super(KeypointRCNNPredictor, self).__init__()
input_features = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_LAYERS[-1]
num_keypoints = cfg.M... | 1,213 | 30.128205 | 79 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/language_backbone/clip_model.py | from collections import OrderedDict
import logging
import os
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from maskrcnn_benchmark.config import try_to_find
from timm.models.layers import DropPath, trunc_normal_
logger = logging.getLogger(__name__)
cl... | 7,704 | 37.525 | 118 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/language_backbone/simple_tokenizer.py | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
from typing import Union, List
import torch
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
... | 5,917 | 33.011494 | 120 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/language_backbone/rnn_model.py | from copy import deepcopy
import numpy as np
import torch
from torch import nn
class RNNEnoder(nn.Module):
def __init__(self, cfg):
super(RNNEnoder, self).__init__()
self.cfg = cfg
self.rnn_type = cfg.MODEL.LANGUAGE_BACKBONE.RNN_TYPE
self.variable_length = cfg.MODEL.LANGUAGE_BACKB... | 5,692 | 48.077586 | 117 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/language_backbone/bert_model.py | from copy import deepcopy
import numpy as np
import torch
from torch import nn
# from pytorch_pretrained_bert.modeling import BertModel
from transformers import BertConfig, RobertaConfig, RobertaModel, BertModel
class BertEncoder(nn.Module):
def __init__(self, cfg):
super(BertEncoder, self).__init__()
... | 3,234 | 39.4375 | 112 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/language_backbone/hfpt_tokenizer.py | from typing import Union, List
from transformers import AutoTokenizer
import torch
class HFPTTokenizer(object):
def __init__(self, pt_name=None):
self.pt_name = pt_name
self.added_sep_token = 0
self.added_cls_token = 0
self.enable_add_tokens = False
self.gpt_special_case ... | 3,187 | 30.88 | 91 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/language_backbone/backbone.py | from collections import OrderedDict
import torch
from torch import nn
from maskrcnn_benchmark.modeling import registry
from . import bert_model
from . import rnn_model
from . import clip_model
from . import word_utils
@registry.LANGUAGE_BACKBONES.register("bert-base-uncased")
def build_bert_backbone(cfg):
body =... | 1,344 | 28.23913 | 84 | py |
GLIP | GLIP-main/maskrcnn_benchmark/modeling/language_backbone/word_utils.py | """
Language-related data loading helper functions and class wrappers.
"""
import re
import torch
import codecs
UNK_TOKEN = '<unk>'
PAD_TOKEN = '<pad>'
END_TOKEN = '<eos>'
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)')
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word =... | 3,179 | 30.485149 | 137 | py |
GLIP | GLIP-main/maskrcnn_benchmark/structures/image_list.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import torch
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in... | 2,418 | 33.070423 | 87 | py |
GLIP | GLIP-main/maskrcnn_benchmark/structures/segmentation_mask.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import pycocotools.mask as mask_utils
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Mask(object):
"""
This class is unfinished and not meant for use yet
It is supposed to contain the mask for an object as
... | 6,965 | 31.4 | 86 | py |
GLIP | GLIP-main/maskrcnn_benchmark/structures/bounding_box.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bou... | 12,010 | 36.301242 | 97 | py |
GLIP | GLIP-main/maskrcnn_benchmark/structures/boxlist_ops.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .bounding_box import BoxList
from maskrcnn_benchmark.layers import nms as _box_nms
from maskrcnn_benchmark.layers import ml_nms as _box_ml_nms
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"):
""... | 5,580 | 29.167568 | 103 | py |
GLIP | GLIP-main/maskrcnn_benchmark/structures/keypoint.py | import torch
from maskrcnn_benchmark.config import cfg
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Keypoints(object):
def __init__(self, keypoints, size, mode=None):
# FIXME remove check once we have better integration with device
# in my version this would consistently return a CPU... | 7,344 | 33.646226 | 128 | py |
coco-minitrain | coco-minitrain-master/src/dataloader.py | import sys
import os
import torch
import numpy as np
import csv
import math
from torch.utils.data import Dataset
from pycocotools.coco import COCO
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
class CocoDataset(Dataset):
"""Coco dataset."""
def __init... | 11,533 | 33.023599 | 138 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/test.py | """General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from --checkpoints_dir and save the results to --results_dir.
It first creates model and dataset given the option. It will hard-code some... | 3,935 | 54.43662 | 123 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/evaluate.py | import os
import random
import torch
import numpy as np
from skimage import io
from skimage.metrics import peak_signal_noise_ratio
from skimage.metrics import structural_similarity
from tqdm import tqdm
from torchvision.transforms.functional import to_tensor
from pytorch_fid.fid_score import calculate_activation_stati... | 3,143 | 33.173913 | 93 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/train.py | import time
import torch
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset give... | 4,429 | 53.691358 | 186 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/options/base_options.py | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional opti... | 9,938 | 58.160714 | 298 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/models/base_model.py | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: ... | 11,223 | 42.335907 | 260 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/models/patchnce.py | from packaging import version
import torch
from torch import nn
class PatchNCELoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
self.mask_dtype = torch.uint8 if version.parse(torch.__ver... | 2,319 | 40.428571 | 114 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/models/cut_model.py | import numpy as np
import torch
from .base_model import BaseModel
from . import networks
from .patchnce import PatchNCELoss
import util.util as util
class CUTModel(BaseModel):
""" This class implements CUT and FastCUT model, described in the paper
Contrastive Learning for Unpaired Image-to-Image Translation
... | 10,226 | 46.567442 | 227 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/models/networks.py | from copy import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
################################... | 61,287 | 42.069571 | 187 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/models/cpt_model.py | import numpy as np
import torch
from models.asp_loss import AdaptiveSupervisedPatchNCELoss
from .base_model import BaseModel
from . import networks
from .patchnce import PatchNCELoss
from .gauss_pyramid import Gauss_Pyramid_Conv
import util.util as util
class CPTModel(BaseModel):
""" Contrastive Paired Translati... | 12,659 | 47.320611 | 227 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/models/gauss_pyramid.py | import torch
from torch import nn
class Gauss_Pyramid_Conv(nn.Module):
"""
Code borrowed from: https://github.com/csjliang/LPTN
"""
def __init__(self, num_high=3):
super(Gauss_Pyramid_Conv, self).__init__()
self.num_high = num_high
self.kernel = self.gauss_kernel()
def gau... | 1,372 | 30.930233 | 74 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/models/asp_loss.py | import time
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
class AdaptiveSupervisedPatchNCELoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
self.m... | 3,796 | 37.744898 | 112 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/util/kid_score.py | #!/usr/bin/env python3
"""Calculates the Kernel Inception Distance (KID) to evalulate GANs
"""
import os
import pathlib
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from sklearn.metrics.pairwise import polynomial_kernel
from scipy import linalg
from PIL ... | 16,195 | 34.991111 | 118 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/util/image_pool.py | import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_... | 2,226 | 39.490909 | 140 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/util/inception.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tenso... | 12,192 | 36.17378 | 140 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/util/fid.py | """Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone progr... | 10,639 | 35.944444 | 79 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/util/util.py | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
import cv2 as cv
def str2bool(v):
if isinstance(v, bool):
return v
... | 7,265 | 31.877828 | 145 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/util/perceptual.py | # Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved
import torch
import torch.nn.functional as F
import torchvision
fro... | 13,025 | 36.431034 | 105 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/data/base_dataset.py | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.... | 8,026 | 33.748918 | 153 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/data/image_folder.py | """A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
import... | 1,941 | 27.985075 | 122 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/data/aligned_dataset.py | import os.path
import numpy as np
import torch
import json
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import util.util as util
class AlignedDataset(BaseDataset):
"""
This dataset class can load aligned/paired dataset... | 4,135 | 42.083333 | 122 | py |
AdaptiveSupervisedPatchNCE | AdaptiveSupervisedPatchNCE-master/data/__init__.py | """This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: ... | 3,694 | 36.323232 | 176 | py |
AdaSVRG | AdaSVRG-master/utils.py | from dependencies import *
import hashlib
import pickle
import json
import os
import itertools
import numpy as np
import tqdm
import shutil
from shutil import copyfile
# =======================================
# haven
from haven import haven_jupyter as hj
from haven import haven_results as hr
# from haven import hav... | 6,833 | 34.780105 | 140 | py |
AdaSVRG | AdaSVRG-master/objectives.py | from dependencies import *
def make_closure(loss_fn, prior_prec=1e-2):
'''Computes loss and gradient of the loss w.r.t. w
Parameters:
loss_fn: the loss function to use (logistic loss, hinge loss, squared error, etc)
prior_prec: precision of the Gaussian prior (pass 0 to avoid regul... | 2,434 | 33.785714 | 144 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.