repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
gluon-cv | gluon-cv-master/gluoncv/data/transforms/block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not u... | 4,927 | 35.503704 | 99 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/pose.py | # pylint: disable=all
"""Pose related transformation functions
Adapted from https://github.com/Microsoft/human-pose-estimation.pytorch
---------------------------------------------
Copyright (c) Microsoft
Licensed under the MIT License.
Written by Bin Xiao (Bin.Xiao@microsoft.com)
------------------------------------... | 20,233 | 31.954397 | 97 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/yolo.py | """Transforms for YOLO series."""
# pylint: disable=not-callable
from __future__ import absolute_import
import numpy as np
import mxnet as mx
from mxnet import autograd
from .. import bbox as tbbox
from .. import image as timage
from .. import experimental
__all__ = ['transform_test', 'load_test', 'YOLO3DefaultTrainTr... | 9,686 | 38.218623 | 99 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/ssd.py | """Transforms described in https://arxiv.org/abs/1512.02325."""
# pylint: disable=not-callable
from __future__ import absolute_import
import numpy as np
import mxnet as mx
from .. import bbox as tbbox
from .. import image as timage
from .. import experimental
from ....utils import try_import_dali
dali = try_import_da... | 13,324 | 36.32493 | 107 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/simple_pose.py | # pylint: disable=all
"""Transforms for simple pose estimation."""
from __future__ import absolute_import
import random
import numpy as np
import mxnet as mx
from ..image import random_flip as random_flip_image
from ..pose import flip_joints_3d, get_affine_transform, affine_transform
from ....utils.filesystem import t... | 6,078 | 35.620482 | 120 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/rcnn.py | """Transforms for RCNN series."""
# pylint: disable=not-callable
from __future__ import absolute_import
import copy
from random import randint
import mxnet as mx
from .. import bbox as tbbox
from .. import image as timage
from .. import mask as tmask
__all__ = ['transform_test', 'load_test',
'FasterRCNND... | 22,475 | 43.157171 | 100 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/segmentation.py | """Transforms for Segmentation models."""
from __future__ import absolute_import
from mxnet.gluon.data.vision import transforms
def test_transform(img, ctx):
transform_fn = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.485, .456, .406], [.229, .224, .225])
])
img = tra... | 398 | 27.5 | 68 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/center_net.py | """Transforms described in https://arxiv.org/abs/1904.07850."""
# pylint: disable=too-many-function-args,not-callable
from __future__ import absolute_import
import numpy as np
import mxnet as mx
from .. import bbox as tbbox
from .. import image as timage
from .. import experimental
from ....utils.filesystem import try_... | 10,774 | 38.181818 | 100 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/alpha_pose.py | """Transforms for alpha pose estimation."""
from __future__ import absolute_import
import random
import numpy as np
import mxnet as mx
from ..pose import random_sample_bbox, count_visible, random_crop_bbox
from ..pose import drawGaussian, transformBox, cv_cropBox, cv_rotate, detector_to_alpha_pose
class AlphaPoseDef... | 7,233 | 33.61244 | 95 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/presets/imagenet.py | """Transforms for ImageNet series."""
from __future__ import absolute_import
import mxnet as mx
from mxnet.gluon.data.vision import transforms
__all__ = ['transform_eval']
def transform_eval(imgs, resize_short=256, crop_size=224,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
"""A uti... | 1,615 | 31.32 | 87 | py |
gluon-cv | gluon-cv-master/gluoncv/data/transforms/experimental/image.py | """Experimental image transformations."""
from __future__ import division
import random
import numpy as np
import mxnet as mx
from mxnet import nd
def random_color_distort(src, brightness_delta=32, contrast_low=0.5, contrast_high=1.5,
saturation_low=0.5, saturation_high=1.5, hue_delta=18):
... | 5,628 | 31.350575 | 87 | py |
gluon-cv | gluon-cv-master/gluoncv/data/kinetics400/classification.py | # pylint: disable=line-too-long,too-many-lines,missing-docstring
"""Kinetics400 video action recognition dataset.
Code adapted from https://github.com/open-mmlab/mmaction and
https://github.com/bryanyzhu/two-stream-pytorch"""
import os
from ..video_custom import VideoClsCustom
__all__ = ['Kinetics400']
class Kinetics... | 14,608 | 80.161111 | 152 | py |
gluon-cv | gluon-cv-master/gluoncv/data/kitti/kitti_dataset.py | """KITTI Dataset. (KITTI Raw, KITTI Odom, KITTI Depth)
Vision meets Robotics: The KITTI Dataset, IJRR 2013
http://www.cvlibs.net/datasets/kitti/raw_data.php
Code partially borrowed from
https://github.com/nianticlabs/monodepth2/blob/master/datasets/kitti_dataset.py
"""
# pylint: disable=abstract-method, unused-import
#... | 5,897 | 34.107143 | 96 | py |
gluon-cv | gluon-cv-master/gluoncv/data/kitti/mono_dataset.py | """Monocular Depth Estimation Dataset.
Digging into Self-Supervised Monocular Depth Prediction, ICCV 2019
https://arxiv.org/abs/1806.01260
Code partially borrowed from
https://github.com/nianticlabs/monodepth2/blob/master/datasets/mono_dataset.py
"""
import random
import copy
import numpy as np
from PIL import Image #... | 7,002 | 33.840796 | 98 | py |
gluon-cv | gluon-cv-master/gluoncv/data/kitti/kitti_utils.py | """Tools for KITTI Dataset
Code partially borrowed from
https://github.com/nianticlabs/monodepth2/blob/master/kitti_utils.py
"""
from __future__ import absolute_import, division, print_function
import os
from collections import Counter
import numpy as np
import mxnet as mx
def load_velodyne_points(filename):
""... | 4,418 | 33.255814 | 97 | py |
gluon-cv | gluon-cv-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup ------------------------------------------------------------... | 9,561 | 30.35082 | 79 | py |
gluon-cv | gluon-cv-master/docs/tutorials/depth/demo_monodepth2.py | """01. Predict depth from a single image with pre-trained Monodepth2 models
===========================================================================
This is a quick demo of using GluonCV Monodepth2 model for KITTI on real-world images.
Please follow the `installation guide <../../index.html#installation>`__
to inst... | 2,952 | 35.012195 | 108 | py |
gluon-cv | gluon-cv-master/docs/tutorials/depth/videos_monodepth2.py | """02. Predict depth from an image sequence or a video with pre-trained Monodepth2 models
===========================================================================
This article will demonstrate how to estimate depth from your image sequence or video stream.
Please follow the `installation guide <../../index.html#ins... | 7,248 | 33.519048 | 221 | py |
gluon-cv | gluon-cv-master/docs/tutorials/depth/test_monodepth2_posenet.py | """04. Testing PoseNet from image sequences with pre-trained Monodepth2 Pose models
===========================================================================
This is a quick demo of using the GluonCV Monodepth2 model for KITTI on real-world images.
Please follow the `installation guide <../../index.html#installation... | 6,612 | 39.820988 | 197 | py |
gluon-cv | gluon-cv-master/docs/tutorials/depth/train_monodepth2.py | """03. Monodepth2 training on KITTI dataset
==================================================
This is a tutorial of training MonoDepth2 on the KITTI dataset using Gluon CV toolkit.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first go through `A 60-minu... | 32,910 | 43.354447 | 260 | py |
gluon-cv | gluon-cv-master/docs/tutorials/instance/train_mask_rcnn_coco.py | """2. Train Mask RCNN end-to-end on MS COCO
===========================================
This tutorial goes through the steps for training a Mask R-CNN [He17]_ instance segmentation model
provided by GluonCV.
Mask R-CNN is an extension to the Faster R-CNN [Ren15]_ object detection model.
As such, this tutorial is also... | 17,072 | 45.394022 | 180 | py |
gluon-cv | gluon-cv-master/docs/tutorials/classification/dive_deep_imagenet.py | """5. Train Your Own Model on ImageNet
==========================================
``ImageNet`` is the most well-known dataset for image classification.
Since it was published, most of the research that advances the state-of-the-art
of image classification was based on this dataset.
Although there are a lot of availab... | 11,330 | 33.972222 | 132 | py |
gluon-cv | gluon-cv-master/docs/tutorials/classification/transfer_learning_minc.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""4. Transfer Learning with Your Own Image Dataset
=======================================================
Dataset size is a big factor in the performance of deep learning models.
``ImageNet`` has over one million labeled images, but
we often don't have so much labeled d... | 10,327 | 33.085809 | 142 | py |
gluon-cv | gluon-cv-master/docs/tutorials/classification/demo_cifar10.py | """1. Getting Started with Pre-trained Model on CIFAR10
=======================================================
`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`__ is a
dataset of tiny (32x32) images with labels, collected by Alex Krizhevsky,
Vinod Nair, and Geoffrey Hinton. It is widely used as benchmark in
com... | 4,475 | 33.697674 | 119 | py |
gluon-cv | gluon-cv-master/docs/tutorials/classification/dive_deep_cifar10.py | """2. Dive Deep into Training with CIFAR10
==============================================
Hope you enjoyed playing with our demo script.
Now, you may be wandering: how exactly was the model trained?
In this tutorial, we will focus on answering this question.
Prerequisites
-------------
We assume readers have a basic... | 12,499 | 36.313433 | 130 | py |
gluon-cv | gluon-cv-master/docs/tutorials/segmentation/train_fcn.py | """4. Train FCN on Pascal VOC Dataset
=====================================
This is a semantic segmentation tutorial using Gluon CV toolkit, a step-by-step example.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first go through `A 60-minute Gluon Crash Co... | 10,825 | 40.799228 | 202 | py |
gluon-cv | gluon-cv-master/docs/tutorials/segmentation/voc_sota.py | """6. Reproducing SoTA on Pascal VOC Dataset
=========================================
This is a semantic segmentation tutorial for reproducing state-of-the-art results
on Pascal VOC dataset using Gluon CV toolkit.
Start Training Now
~~~~~~~~~~~~~~~~~~
.. hint::
Feel free to skip the tutorial because the traini... | 9,316 | 42.537383 | 224 | py |
gluon-cv | gluon-cv-master/docs/tutorials/segmentation/demo_deeplab.py | """3. Test with DeepLabV3 Pre-trained Models
======================================
This is a quick demo of using GluonCV DeepLabV3 model on ADE20K dataset.
Please follow the `installation guide <../../index.html#installation>`__
to install MXNet and GluonCV if not yet.
"""
import mxnet as mx
from mxnet import image
f... | 2,120 | 32.140625 | 78 | py |
gluon-cv | gluon-cv-master/docs/tutorials/segmentation/train_psp.py | """5. Train PSPNet on ADE20K Dataset
=================================
This is a tutorial of training PSPNet on ADE20K dataset using Gluon Vison.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first go through `A 60-minute Gluon Crash Course <http://gluon-... | 9,071 | 38.964758 | 158 | py |
gluon-cv | gluon-cv-master/docs/tutorials/segmentation/demo_icnet.py | """7. Test with ICNet Pre-trained Models for Multi-Human Parsing
======================================
This is a quick demo of using GluonCV ICNet model for multi-human parsing on real-world images.
Please follow the `installation guide <../../index.html#installation>`__
to install MXNet and GluonCV if not yet.
"""
i... | 2,179 | 32.030303 | 104 | py |
gluon-cv | gluon-cv-master/docs/tutorials/segmentation/demo_psp.py | """2. Test with PSPNet Pre-trained Models
======================================
This is a quick demo of using GluonCV PSPNet model on ADE20K dataset.
Please follow the `installation guide <../../index.html#installation>`__
to install MXNet and GluonCV if not yet.
"""
import mxnet as mx
from mxnet import image
from mx... | 2,109 | 32.492063 | 78 | py |
gluon-cv | gluon-cv-master/docs/tutorials/segmentation/demo_fcn.py | """1. Getting Started with FCN Pre-trained Models
==============================================
This is a quick demo of using GluonCV FCN model on PASCAL VOC dataset.
Please follow the `installation guide <../../index.html#installation>`__
to install MXNet and GluonCV if not yet.
"""
import mxnet as mx
from mxnet imp... | 2,954 | 34.178571 | 106 | py |
gluon-cv | gluon-cv-master/docs/tutorials/deployment/int8_inference.py | """3. Inference with Quantized Models
=====================================
This is a tutorial which illustrates how to use quantized GluonCV
models for inference on Intel Xeon Processors to gain higher performance.
The following example requires ``GluonCV>=0.5`` and ``MXNet-mkl>=1.6.0b20191010``. Please follow `our ... | 17,366 | 74.508696 | 801 | py |
gluon-cv | gluon-cv-master/docs/tutorials/action_recognition/demo_i3d_kinetics400.py | """3. Getting Started with Pre-trained I3D Models on Kinetcis400
================================================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimmed videos
f... | 4,407 | 44.443299 | 124 | py |
gluon-cv | gluon-cv-master/docs/tutorials/action_recognition/demo_slowfast_kinetics400.py | """5. Getting Started with Pre-trained SlowFast Models on Kinetcis400
=====================================================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimme... | 4,653 | 44.627451 | 108 | py |
gluon-cv | gluon-cv-master/docs/tutorials/action_recognition/dive_deep_slowfast_kinetics400.py | """6. Dive Deep into Training SlowFast mdoels on Kinetcis400
============================================================
This is a video action recognition tutorial using Gluon CV toolkit, a step-by-step example.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users... | 8,271 | 37.119816 | 140 | py |
gluon-cv | gluon-cv-master/docs/tutorials/action_recognition/demo_tsn_ucf101.py | """1. Getting Started with Pre-trained TSN Models on UCF101
===========================================================
`UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 13,320 short trimmed videos
from 101 action categories,... | 6,509 | 36.848837 | 103 | py |
gluon-cv | gluon-cv-master/docs/tutorials/action_recognition/dive_deep_tsn_ucf101.py | """2. Dive Deep into Training TSN mdoels on UCF101
==================================================
This is a video action recognition tutorial using Gluon CV toolkit, a step-by-step example.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first go throug... | 9,107 | 38.428571 | 186 | py |
gluon-cv | gluon-cv-master/docs/tutorials/action_recognition/dive_deep_i3d_kinetics400.py | """4. Dive Deep into Training I3D mdoels on Kinetcis400
=======================================================
This is a video action recognition tutorial using Gluon CV toolkit, a step-by-step example.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first... | 8,172 | 36.837963 | 140 | py |
gluon-cv | gluon-cv-master/docs/tutorials/action_recognition/finetune_custom.py | """7. Fine-tuning SOTA video models on your own dataset
=======================================================
This is a video action recognition tutorial using Gluon CV toolkit, a step-by-step example.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first... | 9,565 | 38.858333 | 169 | py |
gluon-cv | gluon-cv-master/docs/tutorials/distributed/distributed_slowfast.py | """1. Distributed training of deep video models
================================================
Training deep neural networks on videos is very time consuming. For example, training a state-of-the-art SlowFast network [Feichtenhofer18]_
on Kinetics400 dataset (with 240K 10-seconds short videos) using a server with 8 ... | 10,674 | 56.392473 | 157 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/pascal_voc.py | """Prepare PASCAL VOC datasets
==============================
`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ is a collection of
datasets for object detection. The most commonly combination for
benchmarking is using *2007 trainval* and *2012 trainval* for training and *2007
test* for validation. This tutorial ... | 5,160 | 48.625 | 174 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/somethingsomethingv2.py | """Prepare the 20BN-something-something Dataset V2
==================================================
`Something-something-v2 <https://20bn.com/datasets/something-something>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 220,847 short trimmed videos
from 174 action categor... | 3,919 | 47.395062 | 139 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/mhp_v1.py | """Prepare Multi-Human Parsing V1 dataset
=========================================
`Multi-Human Parsing V1 (MHP-v1) <https://github.com/ZhaoJ9014/Multi-Human-Parsing/>`_ is
a human-centric dataset for multi-human parsing task. It contains
five thousands images annotated with 18 categories.
This tutorial helps you to ... | 1,891 | 37.612245 | 114 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/ucf101.py | """Prepare the UCF101 dataset
=============================
`UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 13,320 short trimmed videos
from 101 action categories, it is one of the most widely used dataset in the research
c... | 6,886 | 44.013072 | 120 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/recordio.py | """Prepare your dataset in ImageRecord format
============================
Raw images are natural data format for computer vision tasks.
However, when loading data from image files for training, disk IO might be a bottleneck.
For instance, when training a ResNet50 model with ImageNet on an AWS p3.16xlarge instance,
T... | 4,489 | 33.538462 | 120 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/hmdb51.py | """Prepare the HMDB51 Dataset
=============================
`HMDB51 <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_ is an action recognition dataset,
collected from various sources, mostly from movies, and a small proportion from public databases such as the Prelinger archive,
YouTube... | 7,177 | 45.012821 | 134 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/imagenet.py | """Prepare the ImageNet dataset
============================
The `ImageNet <http://www.image-net.org/>`_ project contains millions of images
and thousands of objects for image classification. It is widely used in the
research community for benchmarking state-of-the-art models.
.. image:: ../../_static/imagenet_banner... | 3,012 | 30.385417 | 107 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/mscoco.py | """Prepare COCO datasets
==============================
`COCO <http://cocodataset.org/#home>`_ is a large-scale object detection, segmentation, and captioning datasetself.
This tutorial will walk through the steps of preparing this dataset for GluonCV.
.. image:: http://cocodataset.org/images/coco-logo.png
.. hint::... | 4,740 | 49.43617 | 174 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/detection_custom.py | """Prepare custom datasets for object detection
===============================================
With GluonCV, we have already provided built-in support for widely used public datasets with zero
effort, e.g. :ref:`sphx_glr_build_examples_datasets_pascal_voc.py` and :ref:`sphx_glr_build_examples_datasets_mscoco.py`.
Ho... | 11,347 | 41.501873 | 255 | py |
gluon-cv | gluon-cv-master/docs/tutorials/datasets/kinetics400.py | """Prepare the Kinetics400 dataset
==================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimmed videos
from 400 action categories, it is one of the largest and most... | 11,064 | 48.618834 | 199 | py |
gluon-cv | gluon-cv-master/docs/tutorials/pose/cam_demo.py | """3. Estimate pose from your webcam
====================================
This article will demonstrate how to estimate people's pose from your webcam video stream.
First, import the necessary modules.
.. code-block:: python
from __future__ import division
import argparse, time, logging, os, math, tqdm, cv... | 4,526 | 28.980132 | 111 | py |
gluon-cv | gluon-cv-master/docs/tutorials/pose/dive_deep_simple_pose.py | """4. Dive deep into Training a Simple Pose Model on COCO Keypoints
===================================================================
In this tutorial, we show you how to train a pose estimation model [1]_ on the COCO dataset.
First let's import some necessary modules.
"""
from __future__ import division
import t... | 7,970 | 36.599057 | 120 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/finetune_detection.py | """08. Finetune a pretrained detection model
============================================
Fine-tuning is commonly used approach to transfer previously trained model to a new dataset.
It is especially useful if the targeting new dataset is relatively small.
Finetuning from pre-trained models can help reduce the risk o... | 8,429 | 47.448276 | 135 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/train_ssd_voc.py | """04. Train SSD on Pascal VOC dataset
======================================
This tutorial goes through the basic building blocks of object detection
provided by GluonCV.
Specifically, we show how to build a state-of-the-art Single Shot Multibox
Detection [Liu16]_ model by stacking GluonCV components.
This is also a ... | 11,937 | 38.529801 | 167 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/train_ssd_advanced.py | """05. Deep dive into SSD training: 3 tips to boost performance
===============================================================
In the previous tutorial :ref:`sphx_glr_build_examples_detection_train_ssd_voc.py`,
we briefly went through the basic APIs that help building the training pipeline of SSD.
In this article, w... | 9,653 | 47.27 | 111 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/demo_jetson.py | """12. Run an object detection model on NVIDIA Jetson module
==========================================================
This tutorial shows how to install MXNet v1.6 (with Jetson support) and GluonCV on a Jetson module and deploy a pre-trained GluonCV model for object detection.
What's in this tutorial?
-------------... | 5,200 | 40.608 | 287 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/demo_webcam.py | """09. Run an object detection model on your webcam
==================================================
This article will shows how to play with pre-trained object detection models by running
them directly on your webcam video stream.
.. note::
- This tutorial has only been tested in a MacOS environment
- Pyt... | 3,217 | 25.595041 | 151 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/train_yolo_v3.py | """07. Train YOLOv3 on PASCAL VOC
================================
This tutorial goes through the basic steps of training a YOLOv3 object detection model
provided by GluonCV.
Specifically, we show how to build a state-of-the-art YOLOv3 model by stacking GluonCV components.
.. hint::
You can skip the rest of th... | 10,529 | 41.979592 | 133 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/train_faster_rcnn_voc.py | """06. Train Faster-RCNN end-to-end on PASCAL VOC
================================================
This tutorial goes through the basic steps of training a Faster-RCNN [Ren15]_ object detection model
provided by GluonCV.
Specifically, we show how to build a state-of-the-art Faster-RCNN model by stacking GluonCV compo... | 15,476 | 44.254386 | 180 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/demo_center_net.py | """11. Predict with pre-trained CenterNet models
================================================
This article shows how to play with pre-trained CenterNet models with only a few
lines of code.
First let's import some necessary libraries:
"""
from gluoncv import model_zoo, data, utils
from matplotlib import pyplot a... | 2,774 | 40.41791 | 83 | py |
gluon-cv | gluon-cv-master/docs/tutorials/detection/demo_yolo.py | """03. Predict with pre-trained YOLO models
==========================================
This article shows how to play with pre-trained YOLO models with only a few
lines of code.
First let's import some necessary libraries:
"""
from gluoncv import model_zoo, data, utils
from matplotlib import pyplot as plt
#########... | 2,706 | 39.402985 | 83 | py |
gluon-cv | gluon-cv-master/docs/tutorials/tracking/train_siamrpn.py | """02. Train SiamRPN on COCO、VID、DET、Youtube_bb
==================================================
This is a Single Obejct Tracking tutorial using Gluon CV toolkit, a step-by-step example.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first go through `A ... | 7,383 | 39.571429 | 166 | py |
gluon-cv | gluon-cv-master/docs/tutorials_torch/action_recognition/demo_i3d_kinetics400.py | """1. Getting Started with Pre-trained I3D Models on Kinetcis400
================================================================
`Kinetics400 <https://deepmind.com/research/open-source/kinetics>`_ is an action recognition dataset
of realistic action videos, collected from YouTube. With 306,245 short trimmed videos
f... | 4,354 | 42.118812 | 124 | py |
gluon-cv | gluon-cv-master/docs/tutorials_torch/action_recognition/extract_feat.py | """3. Extracting video features from pre-trained models
=======================================================
Feature extraction is a very useful tool when you don't have large annotated dataset or don't have the
computing resources to train a model from scratch for your use case. It's also useful to visualize what ... | 3,814 | 55.940299 | 171 | py |
gluon-cv | gluon-cv-master/docs/tutorials_torch/action_recognition/ddp_pytorch.py | """5. DistributedDataParallel (DDP) Framework
=======================================================
Training deep neural networks on videos is very time consuming.
For example, training a state-of-the-art SlowFast network on Kinetics400 dataset (with 240K 10-seconds short videos)
using a server with 8 V100 GPUs take... | 7,141 | 60.042735 | 146 | py |
DRIVE | DRIVE-master/main_reinforce.py | import argparse, math, os
import numpy as np
import random
import yaml
from easydict import EasyDict
from src.enviroment import DashCamEnv
from RLlib.REINFORCE.reinforce import REINFORCE
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.autograd import Variable
from sr... | 14,192 | 46.627517 | 184 | py |
DRIVE | DRIVE-master/main_test_RLsaliency.py | import os
import torch
import numpy as np
from tqdm import tqdm
from src.enviroment import DashCamEnv
from RLlib.SAC.sac import SAC
from main_sac import parse_configs, set_deterministic
from torch.utils.data import DataLoader
from torchvision import transforms
from src.DADA2KS import DADA2KS
from src.data_transform imp... | 8,216 | 46.224138 | 157 | py |
DRIVE | DRIVE-master/main_saliency.py | import os
import torch
from sklearn.utils import shuffle
from src.saliency.mlnet import MLNet, ModMSELoss
from src.DADALoader import DADALoader
import time, argparse
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.io import write_video
from src.data_transform import... | 12,974 | 45.339286 | 147 | py |
DRIVE | DRIVE-master/plot_saliency.py | import os, cv2
import argparse
import numpy as np
import matplotlib.pyplot as plt
from src.saliency.mlnet import MLNet
import torch
from torchvision import transforms
from src.data_transform import ProcessImages, ProcessFixations
# from src.TorchFovea import TorchFovea
def minmax_norm(salmap):
"""Normalize the sa... | 3,724 | 39.053763 | 124 | py |
DRIVE | DRIVE-master/main_visualize.py | import os, cv2
import argparse
import numpy as np
import matplotlib.pyplot as plt
from src.saliency.mlnet import MLNet
import torch
from torchvision import transforms
from src.data_transform import ProcessImages, ProcessFixations
from src.TorchFovea import TorchFovea
def read_frames_from_videos(root_path, vid_name, s... | 13,510 | 45.112628 | 162 | py |
DRIVE | DRIVE-master/main_sac.py | import argparse, os
import torch
import numpy as np
import itertools
import datetime
import random
import yaml
from easydict import EasyDict
import time
import warnings
warnings.filterwarnings("ignore", message=r"Passing", category=FutureWarning)
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data ... | 16,511 | 46.585014 | 184 | py |
DRIVE | DRIVE-master/src/DADALoader.py | import os
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class DADALoader(Dataset):
def __init__(self, root_path, phase, interval=1, max_frames=-1,
transforms={'image':None, 'salmap': None, 'fixpt': None},
... | 17,329 | 44.485564 | 147 | py |
DRIVE | DRIVE-master/src/DADA2KS.py | import os
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class DADA2KS(Dataset):
def __init__(self, root_path, phase, interval=1, transforms={'image':None, 'salmap': None, 'fixpt': None},
use_salmap=True, use_fixation=True... | 9,642 | 47.457286 | 140 | py |
DRIVE | DRIVE-master/src/TorchFovea.py | import torch
import math
import torch.nn.functional as F
from kornia import PyrDown, PyrUp
class TorchFovea(torch.nn.Module):
def __init__(self, imgsize, sigma, level=5, factor=2.0, device=torch.device('cuda')):
super(TorchFovea).__init__()
assert len(imgsize) == 2, "Invalid image size!"
se... | 5,020 | 38.849206 | 132 | py |
DRIVE | DRIVE-master/src/data_transform.py | """ This file is modified from:
https://raw.githubusercontent.com/piergiaj/pytorch-i3d/master/videotransforms.py
"""
import numpy as np
import cv2
import numbers
import random
import torch
def scales_to_point(scales, image_size, input_size):
"""Transform the predicted scaling factor ranging from -1 to 1
into ... | 6,505 | 35.757062 | 119 | py |
DRIVE | DRIVE-master/src/enviroment.py | from gym import spaces, core
import torch
import torch.nn.functional as F
from src.saliency.mlnet import MLNet
from src.saliency.tasednet import TASED_v2
from src.TorchFovea import TorchFovea
from src.data_transform import scales_to_point, norm_fix
from metrics.losses import fixation_loss
import numpy as np
import os, ... | 11,048 | 46.017021 | 151 | py |
DRIVE | DRIVE-master/src/saliency/mlnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models, transforms
import math
class ResNet_FPN(torch.nn.Module):
def __init__(self, n_layers=50, preTrained=False):
super(ResNet_FPN, self).__init__()
if n_layers == 50:
self.net = models.resnet5... | 6,950 | 39.412791 | 157 | py |
DRIVE | DRIVE-master/src/saliency/tasednet.py | import torch
from torch import nn
class TASED_v2(nn.Module):
def __init__(self, input_shape):
super(TASED_v2, self).__init__()
self.input_shape = input_shape
self.output_shape = [int(input_shape[0] / 8), int(input_shape[1] / 8)]
self.base1 = nn.Sequential(
SepConv3d(3, 6... | 15,285 | 36.374083 | 162 | py |
DRIVE | DRIVE-master/metrics/losses.py | import torch
def exp_loss(pred, target, time, toa):
'''
:param pred:
:param target: onehot codings for binary classification
:param time:
:param toa:
:return:
'''
pred = torch.cat([(1.0 - pred).unsqueeze(1), pred.unsqueeze(1)], dim=1)
# positive example (exp_loss)
target_cls = t... | 1,856 | 38.510638 | 105 | py |
DRIVE | DRIVE-master/RLlib/REINFORCE/reinforce.py | # import sys
import math
import os
import torch.__config__
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.utils as utils
from torch.autograd import Variable
from .agents import Policy, ValueEstimator
from src.data_transform import scales_to_point, norm_fix
from metrics.losses import exp_l... | 6,863 | 42.443038 | 153 | py |
DRIVE | DRIVE-master/RLlib/REINFORCE/agents.py | import torch
import torch.nn as nn
import torch.nn.functional as F
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
# Initialize Policy weights
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
if isinstance(m, nn.LSTMCell):... | 1,927 | 31.133333 | 82 | py |
DRIVE | DRIVE-master/RLlib/SAC/sac.py | import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from torch.nn.utils import clip_grad_norm_
from .utils import soft_update, hard_update
from .agents import AccidentPolicy, FixationPolicy, QNetwork, StateDecoder
from src.data_transform import scales_to_point, norm_fix
from metrics.loss... | 14,902 | 48.184818 | 154 | py |
DRIVE | DRIVE-master/RLlib/SAC/agents.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
epsilon = 1e-6
# Initialize Policy weights
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.co... | 10,045 | 36.485075 | 137 | py |
DRIVE | DRIVE-master/RLlib/SAC/utils.py | import math
import torch
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=Non... | 965 | 32.310345 | 83 | py |
DRIVE | DRIVE-master/RLlib/SAC/replay_buffer.py | import numpy as np
import random
from pynvml import *
import torch
class ReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, rnn_state, labels, done):
state_cpu = state.cpu().n... | 5,446 | 45.555556 | 145 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/main_train_MHCNN_color.py | import os.path
import math
import argparse
import time
import random
import numpy as np
from collections import OrderedDict
import logging
import torch
from torch.utils.data import DataLoader
from utils import utils_logger
from utils import utils_image as util
from utils import utils_option as option
from data.selec... | 8,347 | 35.614035 | 125 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/main_test_MHCNN_color.py | import os.path
import logging
import numpy as np
from datetime import datetime
from collections import OrderedDict
# from scipy.io import loadmat
import torch
from utils import utils_logger
from utils import utils_model
from utils import utils_image as util
def main():
# ---------------------------------------... | 5,751 | 36.594771 | 146 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/main_train_MHCNN.py | import os.path
import math
import argparse
import time
import random
import numpy as np
from collections import OrderedDict
import logging
import torch
from torch.utils.data import DataLoader
from utils import utils_logger
from utils import utils_image as util
from utils import utils_option as option
from data.selec... | 8,341 | 35.587719 | 125 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/main_test_MHCNN.py | import os.path
import logging
import numpy as np
from datetime import datetime
from collections import OrderedDict
# from scipy.io import loadmat
import torch
from utils import utils_logger
from utils import utils_model
from utils import utils_image as util
def main():
# ---------------------------------------... | 5,744 | 36.54902 | 146 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/model_base.py | import os
import torch
import torch.nn as nn
from utils.utils_bnorm import merge_bn, tidy_sequential
class ModelBase():
def __init__(self, opt):
self.opt = opt # opt
self.save_dir = opt['path']['models'] # save models
self.device = torch.device('cuda' if opt['gpu_i... | 5,372 | 29.355932 | 148 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/network_mhcnn_color.py | import torch
import torch.nn as nn
import models.basicblock as B
from models.transformer import Multi_Scale_Attention5
# 还是不好的话,可以去掉那两次注意力,把nc改成128
class D_Block(nn.Module):
def __init__(self, channel_in, channel_out):
super(D_Block, self).__init__()
self.conv_1 = nn.Conv2d(in_channels=channel_in, ... | 2,723 | 31.047059 | 126 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/model_plain_mhcnn.py | from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam
from torch.nn.parallel import DataParallel # , DistributedDataParallel
from models.select_network import define_G
from models.model_base import ModelBase
from models.loss_ssim impor... | 9,242 | 37.194215 | 146 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/select_network.py | import functools
import torch
from torch.nn import init
"""
# --------------------------------------------
# select the network of G, D and F
# --------------------------------------------
"""
# --------------------------------------------
# Generator, netG, G
# --------------------------------------------
def defi... | 4,357 | 32.267176 | 113 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/model_plain.py | from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam
from torch.nn.parallel import DataParallel # , DistributedDataParallel
from models.select_network import define_G
from models.model_base import ModelBase
from models.loss_ssim impor... | 9,116 | 37.631356 | 146 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/loss.py | import torch
import torch.nn as nn
# --------------------------------------------
# GAN loss: gan, ragan
# --------------------------------------------
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = ga... | 3,823 | 35.075472 | 97 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/basicblock.py | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.batchrenorm import BatchRenorm2d
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
... | 48,670 | 40.528157 | 169 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/transformer.py | """
Adapted from https://github.com/lukemelas/simple-bert
"""
import numpy as np
from torch import nn
from torch import Tensor
from torch.nn import functional as F
import torch
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import models.basicblock as B
# 展开,linear不行,因为你图像尺寸是变得,所以得用1x... | 14,009 | 35.579634 | 80 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/loss_ssim.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
"""
# ============================================
# SSIM loss
# https://github.com/Po-Hsun-Su/pytorch-ssim
# ============================================
"""
def gaussian(window_size, sigma):
... | 3,708 | 30.974138 | 104 | py |
MHCNN | MHCNN-main/MHCNN_syndataset/models/network_mhcnn.py | import torch
import torch.nn as nn
import models.basicblock as B
from models.transformer import Multi_Scale_Attention5
class D_Block(nn.Module):
def __init__(self, channel_in, channel_out):
super(D_Block, self).__init__()
self.conv_1 = nn.Conv2d(in_channels=channel_in, out_channels=int(channel_in /... | 2,608 | 32.448718 | 126 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.