repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/__init__.py | zamba/models/__init__.py | from zamba.models.efficientnet_models import TimeDistributedEfficientNet # noqa: F401
from zamba.models.slowfast_models import SlowFast # noqa: F401
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/densepose/densepose_manager.py | zamba/models/densepose/densepose_manager.py | import json
import logging
from pathlib import Path
from typing import Optional
import cv2
try:
from densepose import add_densepose_config
from densepose.data.utils import get_class_to_mesh_name_mapping
from densepose.modeling.build import build_densepose_embedder
from densepose.structures.cse import ... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/densepose/config.py | zamba/models/densepose/config.py | from enum import Enum
import os
from pathlib import Path
from typing import Optional
from loguru import logger
import pandas as pd
from pydantic.class_validators import root_validator, validator
from tqdm import tqdm
from zamba.data.video import VideoLoaderConfig
from zamba.models.config import (
ZambaBaseModel,
... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/densepose/__init__.py | zamba/models/densepose/__init__.py | from .densepose_manager import DensePoseManager, MODELS # noqa
from .config import DensePoseConfig, DensePoseOutputEnum # noqa
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/depth_estimation/depth_manager.py | zamba/models/depth_estimation/depth_manager.py | from loguru import logger
import numpy as np
import pandas as pd
from pathlib import Path
import torch
import torch.utils
import torch.utils.data
from torchvision import transforms
from torchvision.transforms import Resize
from tqdm import tqdm
from zamba.data.video import load_video_frames
from zamba.models.utils imp... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/depth_estimation/config.py | zamba/models/depth_estimation/config.py | import os
from pathlib import Path
from loguru import logger
import pandas as pd
from pydantic import DirectoryPath, FilePath, validator, root_validator
from typing import Optional, Union
from zamba.models.config import (
ZambaBaseModel,
check_files_exist_and_load,
get_video_filepaths,
GPUS_AVAILABLE,... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/depth_estimation/__init__.py | zamba/models/depth_estimation/__init__.py | from .depth_manager import DepthDataset, DepthEstimationManager, MODELS # noqa
from .config import DepthEstimationConfig # noqa
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/dataloaders.py | zamba/pytorch/dataloaders.py | from typing import Optional, Tuple
import warnings
from loguru import logger
import numpy as np
import pandas as pd
from pandas_path import path # noqa: F401
import torch
import torchvision.datasets.video_utils
from torchvision.datasets.vision import VisionDataset
import torchvision.transforms.transforms
from zamba... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/finetuning.py | zamba/pytorch/finetuning.py | from typing import Optional
import pytorch_lightning as pl
def multiplier_factory(rate: float):
"""Returns a function that returns a constant value for use in computing a constant learning
rate multiplier.
Args:
rate (float): Constant multiplier.
"""
def multiplier(*args, **kwargs):
... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/utils.py | zamba/pytorch/utils.py | from typing import Optional, Tuple
import torch
def build_multilayer_perceptron(
input_size: int,
hidden_layer_sizes: Optional[Tuple[int]],
output_size: int,
activation: Optional[torch.nn.Module] = torch.nn.ReLU,
dropout: Optional[float] = None,
output_dropout: Optional[float] = None,
out... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/__init__.py | zamba/pytorch/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/transforms.py | zamba/pytorch/transforms.py | import itertools
from typing import Optional, Tuple
import torch
from torchvision import transforms
from torchvision.transforms import Normalize
class ConvertTHWCtoCTHW(torch.nn.Module):
"""Convert tensor from (0:T, 1:H, 2:W, 3:C) to (3:C, 0:T, 1:H, 2:W)"""
def forward(self, vid: torch.Tensor) -> torch.Tens... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/layers.py | zamba/pytorch/layers.py | import torch
def _stack_tups(tuples, stack_dim=1):
"""Stack tuple of tensors along `stack_dim`
NOTE: vendored (with minor adaptations) from fastai:
https://github.com/fastai/fastai/blob/4b0785254fdece1a44859956b6e54eedb167a97e/fastai/layers.py#L505-L507
Updates:
- use `range` rather than fa... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/data/metadata.py | zamba/data/metadata.py | import itertools
from uuid import uuid4
from loguru import logger
import numpy as np
import pandas as pd
from typing import Dict, Optional, Union
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# From https://docs.python.org/3/library/itertools.html#recipes
# Recipe credited to... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/data/__init__.py | zamba/data/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/data/video.py | zamba/data/video.py | from fractions import Fraction
from functools import reduce
import hashlib
import json
from math import floor
import os
from pathlib import Path
import subprocess
from shutil import rmtree
import tempfile
from typing import Optional, Union, List
import cv2
from cloudpathlib import S3Path, AnyPath
import ffmpeg
from lo... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/result.py | zamba/images/result.py | from typing import Any
import pandas as pd
from PIL import Image
from pydantic import BaseModel
class ImageDetectionResult(BaseModel):
category: str
conf: float
bbox: list[
float
] # MegaDetector bbox is relative measures from top left [x1, y1, width, height]
classifications: list[list]
... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/config.py | zamba/images/config.py | import json
import os
from enum import StrEnum
from pathlib import Path
from typing import Any, Dict, Optional, Union
import appdirs
import pandas as pd
import torch
from loguru import logger
from pydantic import DirectoryPath, FilePath, root_validator, validator
from tqdm.contrib.concurrent import process_map
from z... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/__init__.py | zamba/images/__init__.py | from .classifier import ImageClassifierModule # noqa: F401
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/evaluate.py | zamba/images/evaluate.py | import io
from typing import Dict, List, Optional
import numpy as np
from PIL import Image
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
f1_score,
precision_score,
recall_score,
top_k_accuracy_score,
)
from zamba.metrics import compute_species_specific_metrics
class Classifi... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/manager.py | zamba/images/manager.py | from collections.abc import Iterable
from datetime import datetime, timezone
import json
import os
from pathlib import Path
import random
from functools import partial
import sys
import git
import mlflow
import pandas as pd
import pytorch_lightning as pl
import torch
from loguru import logger
from megadetector.detecti... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/classifier.py | zamba/images/classifier.py | import os
from typing import Any, List, Optional, Union
import numpy as np
import pytorch_lightning as pl
import timm
import torch
from torch.optim import AdamW
from torch.optim.lr_scheduler import LRScheduler
import torch.nn as nn
import torch.utils
from zamba.images.evaluate import ClassificationEvaluator
from zamba... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/data.py | zamba/images/data.py | import copy
import os
from itertools import repeat
from pathlib import Path
from typing import Optional
import pandas as pd
import pytorch_lightning as pl
from loguru import logger
from megadetector.detection import run_detector
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision im... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/bbox.py | zamba/images/bbox.py | from enum import StrEnum
from pathlib import Path
from typing import Iterable, Tuple, Union
from loguru import logger
import pandas as pd
from PIL import Image
class BboxInputFormat(StrEnum):
COCO = "coco"
MEGADETECTOR = "megadetector"
class BboxLayout(StrEnum):
XYXY = "xyxy"
XYWH = "xywh"
def bb... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/dataset/dataset.py | zamba/images/dataset/dataset.py | import json
import math
import os
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional
import boto3
import pandas as pd
import typer
import yaml
from botocore import UNSIGNED
from botocore.config import Config
from botocore.exceptions import ClientError
from loguru import... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/dataset/__init__.py | zamba/images/dataset/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/__init__.py | zamba/object_detection/__init__.py | from zamba.object_detection.yolox.yolox_model import YoloXArgs, YoloXExp, YoloXModel
__all__ = ["YoloXArgs", "YoloXExp", "YoloXModel"]
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/yolox/yolox_model.py | zamba/object_detection/yolox/yolox_model.py | from pathlib import Path
import os
import yaml
from pydantic import BaseModel
from yolox.exp import Exp
import yolox.utils as utils
class YoloXArgs(BaseModel):
"""Args for commandline training of yolox from:
train: https://github.com/Megvii-BaseDetection/YOLOX/blob/68408b4083f818f50aacc29881e6f97cd19fcef2/t... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/yolox/__init__.py | zamba/object_detection/yolox/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/yolox/megadetector_lite_yolox.py | zamba/object_detection/yolox/megadetector_lite_yolox.py | from enum import Enum
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from PIL import Image, ImageOps
from pydantic import BaseModel
import torch
from tqdm import tqdm
from yolox.utils.boxes import postprocess
from zamba.object_detection import... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_metrics.py | tests/test_metrics.py | import numpy as np
import pytest
from zamba.metrics import compute_species_specific_metrics
@pytest.fixture
def y_true():
y_true = np.array([1, 1, 1, 1, 0, 0, 0, 0])
# Mutually exclusive labels
return np.c_[1 - y_true, y_true]
@pytest.fixture
def y_pred():
y_pred = np.array([0, 1, 1, 1, 1, 1, 1, 0])... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_depth.py | tests/test_depth.py | import appdirs
import numpy as np
import pandas as pd
from pathlib import Path
from pydantic import ValidationError
import pytest
from zamba.models.config import GPUS_AVAILABLE
from zamba.models.depth_estimation import DepthEstimationManager, DepthEstimationConfig
from conftest import ASSETS_DIR, TEST_VIDEOS_DIR
@p... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_metadata.py | tests/test_metadata.py | import itertools
from string import ascii_letters
import numpy as np
import pandas as pd
from zamba.data.metadata import create_site_specific_splits, one_hot_to_labels
def test_site_specific_splits():
group = pd.Series(
list(
itertools.chain.from_iterable(
[element] * n for e... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_densepose.py | tests/test_densepose.py | import os
import pytest
from pydantic import ValidationError
from zamba.data.video import VideoLoaderConfig
from zamba.models.densepose import DensePoseManager, DensePoseConfig
from zamba.models.densepose.densepose_manager import MODELS
from conftest import ASSETS_DIR
@pytest.fixture
def chimp_video_path():
r... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_config.py | tests/test_config.py | from pathlib import Path
import pytest
import appdirs
import numpy as np
import pandas as pd
from pydantic import ValidationError
from zamba.models.config import (
EarlyStoppingConfig,
ModelConfig,
PredictConfig,
SchedulerConfig,
TrainConfig,
)
from conftest import ASSETS_DIR, TEST_VIDEOS_DIR
@... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_datamodule.py | tests/test_datamodule.py | import itertools
from zamba.pytorch.dataloaders import get_datasets
from zamba.pytorch_lightning.video_modules import ZambaVideoDataModule
def test_get_datasets_train_metadata(train_metadata):
train_dataset, val_dataset, test_dataset, predict_dataset = get_datasets(
train_metadata=train_metadata,
)
... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_model_manager.py | tests/test_model_manager.py | import json
from pathlib import Path
from botocore.exceptions import ClientError
import pytest
import torch
import yaml
from zamba.models.utils import download_weights, get_model_checkpoint_filename
from zamba.models.model_manager import train_model
from conftest import DummyTrainConfig, TEST_VIDEOS_DIR, labels_n_cl... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_image_file_handling.py | tests/test_image_file_handling.py | """Tests for filepath handling in the image code path"""
import json
from pathlib import Path
import shutil
import pandas as pd
import pytest
import torch
from typer.testing import CliRunner
from zamba.image_cli import app as image_app
from zamba.images.data import ImageClassificationDataModule
from conftest import ... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_instantiate_model.py | tests/test_instantiate_model.py | import pandas as pd
import pytest
import torch
from zamba.models.config import SchedulerConfig, TrainConfig
from zamba.models.model_manager import instantiate_model
from zamba.models.utils import get_model_species
from conftest import DummyZambaVideoClassificationLightningModule
def test_scheduler_ignored_for_predi... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_load_video_frames.py | tests/test_load_video_frames.py | import os
from pathlib import Path
import pytest
import shutil
import subprocess
from typing import Any, Callable, Dict, Optional, Union
from unittest import mock
import numpy as np
from PIL import Image
from pydantic import BaseModel, ValidationError
from zamba.data.video import (
load_video_frames,
Megadete... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_cli.py | tests/test_cli.py | import os
from pathlib import Path
import shutil
from typer.testing import CliRunner
import pandas as pd
import pytest
from pytest_mock import mocker # noqa: F401
from zamba.cli import app
from conftest import ASSETS_DIR, TEST_VIDEOS_DIR
runner = CliRunner()
@pytest.fixture
def minimum_valid_train(labels_absolut... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/conftest.py | tests/conftest.py | import logging
import os
import random
import string
from typing import Optional, Union
from loguru import logger
import pandas as pd
from pathlib import Path
import pytest
from _pytest.logging import caplog as _caplog # noqa: F401
import torch
from zamba.data.video import VideoLoaderConfig
from zamba.models.config ... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_transforms.py | tests/test_transforms.py | import torch
from zamba.pytorch.transforms import PadDimensions
def test_pad_dimensions():
# do not change size of None dimensions
pad = PadDimensions((None, 2))
x = torch.randn(3, 1)
padded_x = pad(x)
assert padded_x.shape == torch.Size([3, 2])
assert (pad(x)[:, 1:] == x).all()
# pad a f... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_filter_frames.py | tests/test_filter_frames.py | import numpy as np
import pytest
from zamba.object_detection.yolox.megadetector_lite_yolox import MegadetectorLiteYoloXConfig
n_frames = 100
rng = np.random.RandomState(68891)
@pytest.fixture
def frames():
# 20 6x8 RGB frames where the upper-right hand pixel is the frame index
frames = rng.randint(0, 255, s... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_megadetector_lite_yolox.py | tests/test_megadetector_lite_yolox.py | import json
import numpy as np
from PIL import Image
import pytest
import torch
from zamba.object_detection import YoloXModel, YoloXExp, YoloXArgs
from zamba.object_detection.yolox.megadetector_lite_yolox import (
MegadetectorLiteYoloX,
MegadetectorLiteYoloXConfig,
)
from conftest import ASSETS_DIR
@pytest... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_zamba_video_classification_lightning_module.py | tests/test_zamba_video_classification_lightning_module.py | import pytest
from zamba.models.slowfast_models import SlowFast
from zamba.models.efficientnet_models import TimeDistributedEfficientNet
from conftest import DummyZambaVideoClassificationLightningModule
@pytest.mark.parametrize("model_class", (SlowFast, TimeDistributedEfficientNet))
def test_save_and_load(model_cla... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_images.py | tests/test_images.py | import json
import logging
import numpy as np
import pandas as pd
import pytest
from PIL import Image
from zamba.images.bbox import BboxInputFormat, bbox_json_to_df, BboxLayout
from zamba.images.classifier import ImageClassifierModule
from zamba.images.config import ImageClassificationTrainingConfig
from zamba.images... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_npy_cache.py | tests/test_npy_cache.py | from pathlib import Path
import yaml
from zamba.data.video import (
VideoLoaderConfig,
npy_cache,
get_cached_array_path,
load_video_frames,
)
config_yaml = """
cache_dir: local_data/cache
crop_bottom_pixels: 50
early_bias: false
ensure_total_frames: true
evenly_sample_total_frames:... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/docs/generate_api_reference.py | docs/generate_api_reference.py | """Helper script for generating mkdocstrings pages in `docs/docs/api-reference`. Meant
to be run manually once in a while with output committed to repo."""
from pathlib import Path
from textwrap import dedent
from yaml import safe_load
DOCS_DIR = Path(__file__).parent
def generate_file(item: dict):
key = next... | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
ripexz/python-tkinter-minesweeper | https://github.com/ripexz/python-tkinter-minesweeper/blob/fc281a07ffdac8975ddaa7ed49a027c563c5f7a1/minesweeper.py | minesweeper.py | # Python Version 2.7.3
# File: minesweeper.py
from tkinter import *
from tkinter import messagebox as tkMessageBox
from collections import deque
import random
import platform
import time
from datetime import time, date, datetime
SIZE_X = 10
SIZE_Y = 10
STATE_DEFAULT = 0
STATE_CLICKED = 1
STATE_FLAGGED = 2
BTN_CLICK... | python | MIT | fc281a07ffdac8975ddaa7ed49a027c563c5f7a1 | 2026-01-05T07:12:51.967728Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/train.py | train.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/hubconf.py | hubconf.py | from strhub.models.utils import create_model
dependencies = ['torch', 'pytorch_lightning', 'timm']
def parseq_tiny(pretrained: bool = False, decode_ar: bool = True, refine_iters: int = 1, **kwargs):
"""
PARSeq tiny model (img_size=128x32, patch_size=8x4, d_model=192)
@param pretrained: (bool) Use pretra... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tune.py | tune.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/bench.py | bench.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/test.py | test.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/read.py | read.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/dist_utils.py | strhub/dist_utils.py | # coding=utf-8
import os
import logging
import torch.distributed as dist
from pytorch_lightning.utilities import rank_zero_only
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
i... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/__init__.py | strhub/__init__.py | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false | |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/model.py | strhub/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have strid... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/simple_tokenizer.py | strhub/clip/simple_tokenizer.py | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corr... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/__init__.py | strhub/clip/__init__.py | from .clip import *
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/clip.py | strhub/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokeni... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/modules.py | strhub/models/modules.py | r"""Shared modules used by CRNN and TRBA"""
from torch import nn
class BidirectionalLSTM(nn.Module):
"""Ref: https://github.com/clovaai/deep-text-recognition-benchmark/blob/master/modules/sequence_modeling.py"""
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/post_process.py | strhub/models/post_process.py | # coding = utf-8
import torch
from strhub.clip import clip
@torch.no_grad()
def clip_post_process(clip_model, image, probs, charset_adapter, char_tokenizer,
K=10, K2=5, num_samples=50, prompt=None, alpha=0.3):
"""using CLIP to do post-refinement
Args:
clip_model: the clip model... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/utils.py | strhub/models/utils.py | from pathlib import PurePath
from typing import Sequence
import torch
from torch import nn
import yaml
from torchvision.ops.misc import FrozenBatchNorm2d
class InvalidModelError(RuntimeError):
"""Exception raised for any model-related error (creation, loading)"""
_WEIGHTS_URL = {
'parseq-tiny': 'https://g... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/__init__.py | strhub/models/__init__.py | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false | |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/base.py | strhub/models/base.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/perm_test.py | strhub/models/vl_str/perm_test.py | # coding=utf-8
import torch
def generate_attn_masks(perm):
"""Generate attention masks given a sequence permutation (includes pos. for bos and eos tokens)
:param perm: the permutation sequence. i = 0 is always the BOS
:return: lookahead attention masks
"""
sz = perm.shape[0]
mask = torch.zeros... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/system.py | strhub/models/vl_str/system.py | # coding=utf-8
import os
import math
import numpy as np
from itertools import permutations
from typing import Sequence, Any, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.optim.lr_scheduler import OneCycleLR
from pytorch_lightning.utilities import rank_... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/modules.py | strhub/models/vl_str/modules.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under t... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/loss.py | strhub/models/vl_str/loss.py | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/__init__.py | strhub/models/vl_str/__init__.py | # coding=utf-8 | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/str_adapter/system.py | strhub/models/str_adapter/system.py | # CLIP Adapters for STR
import os
import math
import warnings
from functools import partial
from itertools import permutations
from typing import Sequence, Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.optim.lr_scheduler import O... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/str_adapter/modules.py | strhub/models/str_adapter/modules.py | # coding=utf-8
import os
import copy
import math
from functools import partial
from collections import OrderedDict
from typing import Optional, Sequence, Callable, Tuple
from timm.models.helpers import named_apply
import torch
from torch import nn as nn, Tensor
from torch.nn import functional as F
from torch.nn.functi... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/str_adapter/__init__.py | strhub/models/str_adapter/__init__.py | # coding=utf-8 | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_alignment.py | strhub/models/abinet/model_alignment.py | import torch
import torch.nn as nn
from .model import Model
class BaseAlignment(Model):
def __init__(self, dataset_max_length, null_label, num_classes, d_model=512, loss_weight=1.0):
super().__init__(dataset_max_length, null_label)
self.loss_weight = loss_weight
self.w_att = nn.Linear(2 *... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/backbone.py | strhub/models/abinet/backbone.py | import torch.nn as nn
from torch.nn import TransformerEncoderLayer, TransformerEncoder
from .resnet import resnet45
from .transformer import PositionalEncoding
class ResTranformer(nn.Module):
def __init__(self, d_model=512, nhead=8, d_inner=2048, dropout=0.1, activation='relu', backbone_ln=2):
super().__... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model.py | strhub/models/abinet/model.py | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dataset_max_length: int, null_label: int):
super().__init__()
self.max_length = dataset_max_length + 1 # additional stop token
self.null_label = null_label
def _get_length(self, logit, dim=-1):
"""... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/resnet.py | strhub/models/abinet/resnet.py | import math
from typing import Optional, Callable
import torch.nn as nn
from torchvision.models import resnet
class BasicBlock(resnet.BasicBlock):
def __init__(self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None,
groups: int = 1, base_width: int = 64, dilat... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/system.py | strhub/models/abinet/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_language.py | strhub/models/abinet/model_language.py | import torch.nn as nn
from torch.nn import TransformerDecoder
from .model import Model
from .transformer import PositionalEncoding, TransformerDecoderLayer
class BCNLanguage(Model):
def __init__(self, dataset_max_length, null_label, num_classes, d_model=512, nhead=8, d_inner=2048, dropout=0.1,
a... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_abinet_iter.py | strhub/models/abinet/model_abinet_iter.py | import torch
from torch import nn
from .model_alignment import BaseAlignment
from .model_language import BCNLanguage
from .model_vision import BaseVision
class ABINetIterModel(nn.Module):
def __init__(self, dataset_max_length, null_label, num_classes, iter_size=1,
d_model=512, nhead=8, d_inner=2... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/__init__.py | strhub/models/abinet/__init__.py | r"""
Fang, Shancheng, Hongtao, Xie, Yuxin, Wang, Zhendong, Mao, and Yongdong, Zhang.
"Read Like Humans: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Recognition." .
In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 7098-7107).2021.
https://arxi... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/attention.py | strhub/models/abinet/attention.py | import torch
import torch.nn as nn
from .transformer import PositionalEncoding
class Attention(nn.Module):
def __init__(self, in_channels=512, max_length=25, n_feature=256):
super().__init__()
self.max_length = max_length
self.f0_embedding = nn.Embedding(max_length, in_channels)
... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_vision.py | strhub/models/abinet/model_vision.py | from torch import nn
from .attention import PositionAttention, Attention
from .backbone import ResTranformer
from .model import Model
from .resnet import resnet45
class BaseVision(Model):
def __init__(self, dataset_max_length, null_label, num_classes,
attention='position', attention_mode='neares... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/transformer.py | strhub/models/abinet/transformer.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.modules.transformer import _get_activation_fn
class TransformerDecoderLayer(nn.Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is base... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/parseq/system.py | strhub/models/parseq/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/parseq/modules.py | strhub/models/parseq/modules.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/parseq/__init__.py | strhub/models/parseq/__init__.py | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false | |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vitstr/model.py | strhub/models/vitstr/model.py | """
Implementation of ViTSTR based on timm VisionTransformer.
TODO:
1) distilled deit backbone
2) base deit backbone
Copyright 2021 Rowel Atienza
"""
import os
import torch
import torch.nn as nn
from pytorch_lightning.utilities import rank_zero_info
from timm.models.vision_transformer import VisionTransformer
class... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vitstr/system.py | strhub/models/vitstr/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vitstr/__init__.py | strhub/models/vitstr/__init__.py | r"""
Atienza, Rowel. "Vision Transformer for Fast and Efficient Scene Text Recognition."
In International Conference on Document Analysis and Recognition (ICDAR). 2021.
https://arxiv.org/abs/2105.08582
All source files, except `system.py`, are based on the implementation listed below,
and hence are released under the... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/crnn/model.py | strhub/models/crnn/model.py | import torch.nn as nn
from strhub.models.modules import BidirectionalLSTM
class CRNN(nn.Module):
def __init__(self, img_h, nc, nclass, nh, leaky_relu=False):
super().__init__()
assert img_h % 16 == 0, 'img_h has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/crnn/system.py | strhub/models/crnn/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/crnn/__init__.py | strhub/models/crnn/__init__.py | r"""
Shi, Baoguang, Xiang Bai, and Cong Yao.
"An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition."
IEEE transactions on pattern analysis and machine intelligence 39, no. 11 (2016): 2298-2304.
https://arxiv.org/abs/1507.05717
All source files, exce... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/prediction.py | strhub/models/trba/prediction.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, input_size, hidden_size, num_class, num_char_embeddings=256):
super().__init__()
self.attention_cell = AttentionCell(input_size, hidden_size, num_char_embeddings)
self.hidden_... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/model.py | strhub/models/trba/model.py | import torch.nn as nn
from strhub.models.modules import BidirectionalLSTM
from .feature_extraction import ResNet_FeatureExtractor
from .prediction import Attention
from .transformation import TPS_SpatialTransformerNetwork
class TRBA(nn.Module):
def __init__(self, img_h, img_w, num_class, num_fiducial=20, input_... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/system.py | strhub/models/trba/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by ... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/feature_extraction.py | strhub/models/trba/feature_extraction.py | import torch.nn as nn
from torchvision.models.resnet import BasicBlock
class ResNet_FeatureExtractor(nn.Module):
""" FeatureExtractor of FAN (http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf) """
def __init__(self, input_channel, output_channel=512):... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/__init__.py | strhub/models/trba/__init__.py | r"""
Baek, Jeonghun, Geewook Kim, Junyeop Lee, Sungrae Park, Dongyoon Han, Sangdoo Yun, Seong Joon Oh, and Hwalsuk Lee.
"What is wrong with scene text recognition model comparisons? dataset and model analysis."
In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4715-4723. 2019.
https://arx... | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.