repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
PyLaia | PyLaia-master/tests/decoders/ctc_greedy_decoder_test.py | import unittest
import torch
from laia.decoders import CTCGreedyDecoder
class CTCGreedyDecoderTest(unittest.TestCase):
def test(self):
x = torch.tensor(
[
[[1.0, 3.0, -1.0, 0.0]],
[[-1.0, 2.0, -2.0, 3.0]],
[[1.0, 5.0, 9.0, 2.0]],
... | 3,819 | 31.10084 | 86 | py |
PyLaia | PyLaia-master/tests/decoders/ctc_language_decoder_test.py | from pathlib import Path
import pytest
import torch
from laia.decoders import CTCLanguageDecoder
tokens = """<ctc>
a
e
h
i
s
t
.
<unk>
<space>"""
lexicon = """<ctc> <ctc>
a a
e e
h h
i i
s s
t t
. .
<unk> <unk>
<space> <space>"""
arpa_lm = """\\data\\
ngram 1=10
ngram 2=14
\\1-grams:
-1.09691\t.\t-0.2648178
-1.09... | 3,112 | 24.941667 | 156 | py |
PyLaia | PyLaia-master/tests/scripts/htr/netout_test.py | import pytest
import torch
from conftest import call_script
from pytorch_lightning import seed_everything
from laia.common.arguments import CommonArgs
from laia.common.saver import ModelSaver
from laia.dummies import DummyMNISTLines, DummyModel
from laia.scripts.htr import netout as script
# TODO: fix test with npro... | 2,996 | 31.934066 | 84 | py |
PyLaia | PyLaia-master/tests/scripts/htr/conftest.py | import ssl
import subprocess
import sys
from typing import List, Optional, Tuple
import pytest
from torchvision.datasets.utils import download_and_extract_archive, download_url
from laia import __root__
def call_script(
file: str, args: List[str], timeout: Optional[int] = 60 * 3
) -> Tuple[str, str]:
# To t... | 2,251 | 36.533333 | 94 | py |
PyLaia | PyLaia-master/tests/scripts/htr/train_ctc_test.py | import pytest
import torch
from conftest import call_script
from packaging import version
from pytorch_lightning import seed_everything
from laia.common.arguments import (
CommonArgs,
DataArgs,
OptimizerArgs,
SchedulerArgs,
TrainArgs,
TrainerArgs,
)
from laia.common.saver import ModelSaver
from... | 8,003 | 30.888446 | 88 | py |
PyLaia | PyLaia-master/tests/scripts/htr/decode_ctc_test.py | import shutil
from io import StringIO
from unittest import mock
import pytest
import torch
from conftest import call_script
from packaging import version
from pytorch_lightning import seed_everything
from laia.common.arguments import CommonArgs, DataArgs, DecodeArgs
from laia.common.saver import ModelSaver
from laia.... | 7,709 | 36.067308 | 105 | py |
PyLaia | PyLaia-master/tests/loggers/epoch_csv_logger_test.py | import pandas as pd
import pytest
import pytorch_lightning as pl
from laia.dummies import DummyEngine, DummyMNIST, DummyTrainer
from laia.loggers.epoch_csv_logger import EpochCSVLogger, EpochCSVWriter
@pytest.mark.parametrize(
["dicts", "key", "expected"],
[
([], None, []),
([{}], None, []),
... | 3,623 | 32.247706 | 86 | py |
PyLaia | PyLaia-master/tests/engine/feeder_test.py | import pytest
import torch
from laia.engine import ImageFeeder, ItemFeeder
def test_item_feeder():
feeder = ItemFeeder("foo")
expected = "bar"
x = {"foo": expected, "baz": 1}
assert feeder(x) == expected
def test_item_feeder_raises():
feeder = ItemFeeder("foo")
with pytest.raises(AssertionE... | 818 | 22.4 | 69 | py |
PyLaia | PyLaia-master/tests/engine/engine_module_test.py | from logging import DEBUG
import pytest
import torch
from laia.common.arguments import OptimizerArgs, SchedulerArgs
from laia.dummies import DummyMNIST, DummyModel, DummyTrainer
from laia.engine import EngineModule
from laia.engine.engine_exception import EngineException
from laia.losses import CTCLoss
@pytest.mark... | 2,506 | 30.3375 | 101 | py |
PyLaia | PyLaia-master/tests/utils/checks_test.py | from logging import DEBUG, INFO
import pytest
import torch
from laia.utils import check_tensor
@pytest.mark.parametrize("raise_exception", [True, False])
def test_check_tensor(caplog, raise_exception):
tensor = torch.tensor([1, float("inf"), 3])
caplog.set_level(DEBUG)
if raise_exception:
with p... | 999 | 33.482759 | 84 | py |
PyLaia | PyLaia-master/tests/utils/kaldi_test.py | import io
import pytest
import torch
from laia.utils import kaldi
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize(
"device", ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
def test_write_binary_matrix(dtype, device):
f = io.BytesIO()
x = torch.tensor(... | 3,550 | 32.186916 | 84 | py |
PyLaia | PyLaia-master/tests/data/padding_collater_test.py | import unittest
import numpy as np
import pytest
import torch
from laia.data import PaddedTensor, PaddingCollater
@pytest.mark.parametrize(
["data", "sizes", "match"],
[
(None, None, None),
(torch.empty(1), None, None),
(torch.empty(1), torch.tensor(1), r"PaddedTensor.sizes must have... | 5,549 | 35.754967 | 88 | py |
PyLaia | PyLaia-master/tests/data/transforms/vision/vision_transforms_test.py | import math
import numpy as np
import pytest
import torch
from PIL import Image
from laia.data.transforms.vision import Convert, Invert, ToImageTensor
def test_invert():
t = Invert()
x = Image.new("L", (30, 40), color=0)
y = t(x)
assert y.size == x.size
assert y.mode == x.mode
y = np.asarray... | 3,967 | 27.342857 | 75 | py |
PyLaia | PyLaia-master/tests/losses/ctc_loss_test.py | import pytest
import torch
from torch.nn.functional import log_softmax
from laia.losses.ctc_loss import CTCLoss, get_valids_and_errors, transform_batch
def test_transform_batch():
with pytest.raises(
NotImplementedError, match=r"Not implemented for type <class 'NoneType'>"
):
transform_batch(... | 4,623 | 31.335664 | 95 | py |
PyLaia | PyLaia-master/laia/nn/mask_image_from_size.py | import torch
from nnutils_pytorch import mask_image_from_size
from laia.data import PaddedTensor
class MaskImageFromSize(torch.nn.Module):
def __init__(self, mask_value=0, inplace=False):
super().__init__()
self.inplace = inplace
self.mask_value = mask_value
def forward(self, x):
... | 652 | 26.208333 | 52 | py |
PyLaia | PyLaia-master/laia/nn/resnet.py | from typing import Optional, Sequence, Type, Union
import torch
import torch.nn as nn
from laia.data import PaddedTensor
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=s... | 9,153 | 29.615385 | 106 | py |
PyLaia | PyLaia-master/laia/nn/adaptive_pool_2d.py | import torch
from nnutils_pytorch import adaptive_avgpool_2d, adaptive_maxpool_2d
from laia.data import PaddedTensor
class AdaptivePool2d(torch.nn.Module):
def __init__(self, output_sizes, func):
super().__init__()
self._output_sizes = output_sizes
self._func = func
self._fixed_si... | 1,247 | 31 | 85 | py |
PyLaia | PyLaia-master/laia/nn/image_pooling_sequencer.py | import re
import torch
from laia.data import PaddedTensor
from laia.nn import AdaptiveAvgPool2d, AdaptiveMaxPool2d
from laia.nn.image_to_sequence import image_to_sequence
class ImagePoolingSequencer(torch.nn.Module):
def __init__(self, sequencer, columnwise=True):
super().__init__()
m = re.matc... | 2,933 | 36.139241 | 81 | py |
PyLaia | PyLaia-master/laia/nn/pyramid_maxpool_2d.py | from typing import Sequence, Union
import torch
from laia.data import PaddedTensor
from laia.nn.temporal_pyramid_maxpool_2d import _adaptive_maxpool_2d
class PyramidMaxPool2d(torch.nn.Module):
def __init__(self, levels: Sequence[int], use_nnutils: bool = True) -> None:
super().__init__()
self._l... | 1,007 | 27.8 | 80 | py |
PyLaia | PyLaia-master/laia/nn/image_to_sequence.py | import torch
from torch.nn.utils.rnn import pack_padded_sequence
from laia.data import PaddedTensor
def image_to_sequence(x, columnwise=True, return_packed=False):
x, xs = (x.data, x.sizes) if isinstance(x, PaddedTensor) else (x, None)
if x.dim() == 2:
x = x.view(1, 1, x.size(0), x.size(1))
elif... | 1,125 | 28.631579 | 86 | py |
PyLaia | PyLaia-master/laia/nn/temporal_pyramid_maxpool_2d.py | from typing import Sequence, Union
import torch
from nnutils_pytorch import adaptive_maxpool_2d
from laia.data import PaddedTensor
def _adaptive_maxpool_2d(batch_input, output_sizes, batch_sizes, use_nnutils):
if use_nnutils:
return adaptive_maxpool_2d(
batch_input=batch_input, output_sizes=... | 1,853 | 30.423729 | 87 | py |
PyLaia | PyLaia-master/laia/callbacks/decode.py | from typing import Callable, Optional, Union
import numpy as np
import pytorch_lightning as pl
from tqdm.auto import tqdm
from laia.decoders import CTCGreedyDecoder
from laia.utils import SymbolsTable
def compute_word_prob(symbols, hyp, prob, input_separator):
"""
Compute confidence score for each word.
... | 4,655 | 35.661417 | 87 | py |
PyLaia | PyLaia-master/laia/callbacks/progress_bar.py | import sys
from collections import defaultdict
from logging import INFO
from typing import Dict, Optional
import pytorch_lightning as pl
from pytorch_lightning.callbacks.progress import convert_inf
from tqdm.auto import tqdm
import laia.common.logging as log
from laia.callbacks.meters import Timer
class ProgressBar... | 7,441 | 35.841584 | 112 | py |
PyLaia | PyLaia-master/laia/callbacks/training_timer.py | import datetime
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
import laia.common.logging as log
from laia.callbacks.meters import Timer
_logger = log.get_logger(__name__)
class TrainingTimer(pl.Callback):
def __init__(self):
super().__init__()
self.tr_tim... | 1,148 | 27.725 | 64 | py |
PyLaia | PyLaia-master/laia/callbacks/segmentation.py | import sys
from typing import Callable, List, Optional, Tuple, Union
import pytorch_lightning as pl
from tqdm.auto import tqdm
from laia.decoders import CTCGreedyDecoder
from laia.utils import SymbolsTable
class Segmentation(pl.Callback):
def __init__(
self,
syms: Union[dict, SymbolsTable],
... | 3,357 | 34.347368 | 87 | py |
PyLaia | PyLaia-master/laia/callbacks/netout.py | from typing import Callable, List, Optional, Union
import pytorch_lightning as pl
import torch
from laia.losses.ctc_loss import transform_batch
from laia.utils import ArchiveLatticeWriter, ArchiveMatrixWriter
class Netout(pl.Callback):
def __init__(
self,
writers: List[Union[ArchiveMatrixWriter,... | 1,241 | 34.485714 | 88 | py |
PyLaia | PyLaia-master/laia/callbacks/progress_bar_gpu_stats.py | from typing import Dict, List, Tuple
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
import laia.common.logging as log
_logger = log.get_logger(__name__)
class ProgressBarGPUStats(pl.callbacks.GPUStat... | 2,252 | 34.203125 | 97 | py |
PyLaia | PyLaia-master/laia/callbacks/learning_rate.py | import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
import laia.common.logging as log
_logger = log.get_logger(__name__)
class LearningRate(pl.callbacks.LearningRateMonitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.last_values... | 1,363 | 33.1 | 73 | py |
PyLaia | PyLaia-master/laia/dummies/dummy_trainer.py | import pytorch_lightning as pl
class DummyTrainer(pl.Trainer):
def __init__(self, **kwargs):
defaults = {
"checkpoint_callback": False,
"logger": False,
"weights_summary": None,
"max_epochs": 1,
"limit_train_batches": 10,
"limit_val_b... | 529 | 26.894737 | 43 | py |
PyLaia | PyLaia-master/laia/dummies/dummy_plugin.py | from pytorch_lightning.plugins.ddp_plugin import DDPPlugin
import laia.common.logging as log
class DummyLoggingPlugin(DDPPlugin):
def __init__(self, log_filepath):
super().__init__()
self.log_filepath = log_filepath
self.setup_logging(self.log_filepath)
@staticmethod
def setup_lo... | 736 | 29.708333 | 76 | py |
PyLaia | PyLaia-master/laia/dummies/dummy_model.py | import torch
from laia.data import PaddedTensor
from laia.nn.image_to_sequence import image_to_sequence
class DummyModel(torch.nn.Module):
"""Dummy HTR model for tests
First, this does an adaptive average pooling converting each images to a
fixed output size of `adaptive_size`.
Then, the fixed-size... | 1,536 | 33.155556 | 87 | py |
PyLaia | PyLaia-master/laia/dummies/modules/dummy_engine.py | import pytorch_lightning as pl
import torch
from laia.dummies.dummy_model import DummyModel
from laia.losses import CTCLoss
class DummyEngine(pl.LightningModule):
def __init__(self):
super().__init__()
# 10 output labels: MNIST classes
self.model = DummyModel((3, 3), 10, horizontal=True)
... | 1,154 | 25.860465 | 79 | py |
PyLaia | PyLaia-master/laia/dummies/data_modules/dummy_mnist.py | import pytorch_lightning as pl
import torch
import torchvision
from laia import __root__
from laia.data.transforms.vision import ToImageTensor
class DummyMNIST(pl.LightningDataModule):
def __init__(self, batch_size: int = 64):
self.batch_size = batch_size
self.root = __root__ / "datasets"
... | 1,710 | 27.04918 | 73 | py |
PyLaia | PyLaia-master/laia/dummies/data_modules/dummy_mnist_lines.py | import shutil
from typing import List, Optional, Union
import numpy as np
import torch
import torchvision
from laia.data import PaddingCollater, TextImageFromTextTableDataset
from laia.dummies import DummyMNIST
from laia.utils import SymbolsTable
class DummyMNISTLines(DummyMNIST):
def __init__(
self,
... | 5,454 | 33.525316 | 86 | py |
PyLaia | PyLaia-master/laia/common/arguments.py | import inspect
from dataclasses import dataclass, field, make_dataclass
from enum import Enum
from os.path import join
from typing import Any, List, Optional, Tuple, Type, Union
import pytorch_lightning as pl
import torch
from jsonargparse.typing import (
ClosedUnitInterval,
NonNegativeFloat,
NonNegativeIn... | 12,569 | 34.210084 | 89 | py |
PyLaia | PyLaia-master/laia/common/saver.py | import inspect
import os
from typing import Any, Callable
import torch
from laia.common.logging import get_logger
_logger = get_logger(__name__)
class Saver:
def __call__(self, *args: Any, **kwargs: Any):
return self.save(*args, **kwargs)
def save(self, *args: Any, **kwargs: Any):
raise No... | 1,480 | 26.425926 | 78 | py |
PyLaia | PyLaia-master/laia/common/logging.py | import logging
import os
import sys
from enum import Enum
from typing import Optional
from pytorch_lightning.utilities import rank_zero_only
from tqdm.auto import tqdm
class TqdmStreamHandler(logging.StreamHandler):
"""
This handler uses tqdm.write to log so
logging messages don't break the tqdm bar.
... | 5,524 | 25.184834 | 87 | py |
PyLaia | PyLaia-master/laia/common/types.py | from typing import Callable, Sequence, Tuple, Union
import torch
Loss = Union[float, torch.FloatTensor]
ParamNd = Union[int, Sequence[int], torch.LongTensor]
Param2d = Union[int, Tuple[int, int], torch.LongTensor]
Module = Callable[..., torch.nn.Module]
| 258 | 22.545455 | 55 | py |
PyLaia | PyLaia-master/laia/common/loader.py | import os
from collections import OrderedDict
from glob import glob
from importlib import import_module
from io import BytesIO
from typing import Any, Callable, Optional, Union
import natsort as ns
import pytorch_lightning as pl
import torch
from laia.common.logging import get_logger
_logger = get_logger(__name__)
... | 4,733 | 32.574468 | 87 | py |
PyLaia | PyLaia-master/laia/models/htr/laia_crnn.py | from itertools import count
from typing import List, Sequence, Tuple, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import PackedSequence
from laia.common.types import Param2d, ParamNd
from laia.data import PaddedTensor
from laia.models.htr import ConvBlock
fro... | 5,271 | 33.012903 | 87 | py |
PyLaia | PyLaia-master/laia/models/htr/conv_block.py | import math
from typing import Any, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from laia.common.types import Param2d
from laia.data import PaddedTensor
from laia.nn.mask_image_from_size import mask_image_from_size
class ConvBlock(nn.Modul... | 4,367 | 31.355556 | 86 | py |
PyLaia | PyLaia-master/laia/models/htr/gated_crnn.py | from typing import List, Optional, Sequence, Union
import torch
from torch.nn.functional import dropout
from torch.nn.utils.rnn import PackedSequence
from laia.common.types import Module, Param2d
from laia.data import PaddedTensor
from laia.nn import ImagePoolingSequencer
class GatedConv2d(torch.nn.Module):
def... | 7,855 | 31.196721 | 88 | py |
PyLaia | PyLaia-master/laia/decoders/ctc_alignment.py | import numpy as np
def ctc_alignment(logpost_matrix, seq, ctc_sym=0):
"""Perform CTC forced alignment of the given sequence in the log-posteriors
matrix.
This obtains the most likely sequence of symbols (incl. CTC-blank symbols)
that generate the given sequence of symbols, according to the input matr... | 2,925 | 33.423529 | 79 | py |
PyLaia | PyLaia-master/laia/decoders/ctc_language_decoder.py | from typing import Any, Dict, List
import numpy as np
import torch
from torchaudio.models.decoder import ctc_decoder
from laia.losses.ctc_loss import transform_batch
class CTCLanguageDecoder:
"""
Intialize a CTC decoder with n-gram language modeling.
Args:
language_model_path (str): path to a Ke... | 3,134 | 35.882353 | 109 | py |
PyLaia | PyLaia-master/laia/decoders/ctc_greedy_decoder.py | from typing import Any, Dict, List
import torch
from laia.losses.ctc_loss import transform_batch
class CTCGreedyDecoder:
def __call__(
self, x: Any, segmentation: bool = False, apply_softmax: bool = True
) -> Dict[str, List]:
x, xs = transform_batch(x)
x = x.detach()
# Apply... | 2,517 | 34.971429 | 124 | py |
PyLaia | PyLaia-master/laia/decoders/ctc_nbest_decoder.py | import torch
from laia.losses.ctc_loss import transform_batch
class CTCNBestDecoder:
"""N-best decoder based on CTC output."""
def __init__(self, nbest):
assert isinstance(nbest, int) and nbest > 0
self._nbest = nbest
self._output = None
def __call__(self, x):
x, xs = tr... | 1,455 | 31.355556 | 86 | py |
PyLaia | PyLaia-master/laia/scripts/htr/train_ctc.py | #!/usr/bin/env python3
from typing import Any, Dict, List, Optional
import jsonargparse
import pytorch_lightning as pl
import torch
import laia.common.logging as log
from laia.callbacks import LearningRate, ProgressBar, ProgressBarGPUStats
from laia.common.arguments import (
CommonArgs,
DataArgs,
Optimize... | 6,569 | 30.73913 | 118 | py |
PyLaia | PyLaia-master/laia/scripts/htr/netout.py | #!/usr/bin/env python3
from os.path import join
from typing import Any, Dict, List, Optional
import jsonargparse
import pytorch_lightning as pl
import laia.common.logging as log
from laia.callbacks import Netout, ProgressBar
from laia.common.arguments import CommonArgs, DataArgs, NetoutArgs, TrainerArgs
from laia.com... | 4,232 | 29.673913 | 88 | py |
PyLaia | PyLaia-master/laia/scripts/htr/decode_ctc.py | #!/usr/bin/env python3
from typing import Any, Dict, List, Optional
import jsonargparse
import pytorch_lightning as pl
import laia.common.logging as log
from laia.callbacks import Decode, ProgressBar, Segmentation
from laia.common.arguments import CommonArgs, DataArgs, DecodeArgs, TrainerArgs
from laia.common.loader ... | 5,406 | 30.436047 | 88 | py |
PyLaia | PyLaia-master/laia/scripts/htr/create_model.py | #!/usr/bin/env python3
from typing import Any, Dict, List, Optional
import jsonargparse
import torch.nn as nn
from jsonargparse.typing import NonNegativeInt
from pytorch_lightning import seed_everything
import laia.common.logging as log
from laia.common.arguments import CommonArgs, CreateCRNNArgs
from laia.common.sav... | 4,303 | 32.364341 | 88 | py |
PyLaia | PyLaia-master/laia/loggers/epoch_csv_logger.py | import csv
import os
import re
from collections import defaultdict
from typing import Optional, Union
from pytorch_lightning.loggers.csv_logs import CSVLogger, ExperimentWriter
from pytorch_lightning.utilities import rank_zero_only
class EpochCSVWriter(ExperimentWriter):
def save(self, version: Optional[int] = N... | 3,108 | 28.894231 | 85 | py |
PyLaia | PyLaia-master/laia/engine/htr_engine_module.py | from typing import Any, Callable, Iterable, Optional
import torch
from laia.callbacks.meters import SequenceError, char_to_word_seq
from laia.common.arguments import OptimizerArgs, SchedulerArgs
from laia.decoders import CTCGreedyDecoder
from laia.engine import EngineModule
from laia.losses import CTCLoss
class HTR... | 3,334 | 30.761905 | 79 | py |
PyLaia | PyLaia-master/laia/engine/feeder.py | import torchvision
from laia.data import PaddedTensor
class Feeder:
"""This class is used to feed data to a model or loss."""
def __call__(self, x):
return self.feed(x)
def feed(self, x):
raise NotImplementedError("Abstract class.")
class ItemFeeder(Feeder):
"""Feed an element fro... | 2,269 | 27.734177 | 83 | py |
PyLaia | PyLaia-master/laia/engine/evaluator_module.py | from typing import Any, Callable, Optional
import pytorch_lightning as pl
import torch
from laia.engine.engine_exception import exception_catcher
class EvaluatorModule(pl.LightningModule):
def __init__(
self,
model: torch.nn.Module,
batch_input_fn: Optional[Callable] = None,
batc... | 1,001 | 27.628571 | 69 | py |
PyLaia | PyLaia-master/laia/engine/engine_module.py | from typing import Any, Callable, Iterator, Optional, Tuple
import pytorch_lightning as pl
import torch
from laia.common.arguments import OptimizerArgs, SchedulerArgs
from laia.common.types import Loss as LossT
from laia.engine.engine_exception import exception_catcher
from laia.losses.loss import Loss
from laia.util... | 6,157 | 35.654762 | 86 | py |
PyLaia | PyLaia-master/laia/engine/data_module.py | import multiprocessing
import random
from typing import Dict, List, Optional, Union
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, DistributedSampler
import laia.common.logging as log
import laia.data.transforms as transforms
from laia.data import (
ImageFr... | 5,628 | 35.083333 | 86 | py |
PyLaia | PyLaia-master/laia/utils/checks.py | from logging import DEBUG
from typing import Optional
import torch
import laia.common.logging as log
def check_tensor(
tensor: torch.Tensor,
msg: Optional[str] = None,
name: Optional[str] = "laia",
raise_exception: bool = False,
**kwargs,
) -> bool:
"""
Checks if each element of a tensor... | 1,589 | 32.125 | 81 | py |
PyLaia | PyLaia-master/laia/utils/kaldi.py | import sys
from pathlib import Path
from typing import BinaryIO, Iterable, TextIO, Tuple, Union
import numpy as np
import torch
def prepare_mat(mat: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
if isinstance(mat, torch.Tensor):
assert mat.dim() == 2, "Input tensor must have 2 dimensions"
# TOD... | 4,289 | 30.544118 | 90 | py |
PyLaia | PyLaia-master/laia/data/image_dataset.py | from typing import Any, Callable, Dict, List, Optional
import torch
from PIL import Image
class ImageDataset(torch.utils.data.Dataset):
def __init__(
self, imgs: List[str], transform: Optional[Callable[[Image.Image], Any]] = None
):
assert isinstance(imgs, (list, tuple))
super().__ini... | 779 | 29 | 87 | py |
PyLaia | PyLaia-master/laia/data/padding_collater.py | from typing import (
Any,
Callable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
import torch
class PaddedTensor(NamedTuple):
data: torch.Tensor
sizes: torch.Tensor
@classmethod
def build(cls, data: torch.Tensor, sizes: torch.T... | 3,758 | 31.405172 | 87 | py |
PyLaia | PyLaia-master/laia/data/unpadded_distributed_sampler.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This progra... | 3,458 | 44.513158 | 86 | py |
PyLaia | PyLaia-master/laia/data/transforms/transforms.py | from typing import Callable, Sequence, Tuple, Union
import numpy as np
import torchvision
class RandomProbChoice(torchvision.transforms.transforms.RandomTransforms):
"""Apply a randomly transformation chosen from a given set with some probability."""
def __init__(
self, transforms: Sequence[Union[Ca... | 1,341 | 28.822222 | 88 | py |
PyLaia | PyLaia-master/laia/data/transforms/vision/transforms.py | from typing import Callable, Optional
import torch
import torchvision
from PIL import Image, ImageOps
class Invert:
"""Invert the colors of a PIL image with the given probability."""
def __call__(self, img: Image) -> Image:
return ImageOps.invert(img)
def __repr__(self) -> str:
return f... | 4,722 | 32.496454 | 87 | py |
PyLaia | PyLaia-master/laia/losses/ctc_loss.py | import itertools
from typing import Dict, List, Optional, Tuple
import torch
import laia.common.logging as log
from laia.losses.loss import Loss
_logger = log.get_logger(__name__)
def transform_batch(batch):
# size: T x N x C
if isinstance(batch, torch.nn.utils.rnn.PackedSequence):
x, xs = torch.nn... | 4,521 | 34.328125 | 83 | py |
PyLaia | PyLaia-master/laia/losses/loss.py | import torch
class Loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target, **kwargs):
raise NotImplementedError
| 179 | 17 | 48 | py |
verifair | verifair-master/model/quickdraw_dis_builder/python/ensemble_method_func.py | from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import time
import pickle
np.random.seed(32113)
def data_preparer_ensemble(df1,df2,df3,df4, lbl = 'word', countries=['US','BR','RU','KR'],\
words=['cat','tiger','lion','dog'],sample=30000, limit = 5000):
... | 6,424 | 38.906832 | 103 | py |
verifair | verifair-master/model/quickdraw_dis_builder/python/cnn_func.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
np.random.seed(32113)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.convo... | 15,064 | 37.236041 | 113 | py |
GraphWriter | GraphWriter-master/vectorize.py | import torch
from collections import Counter
import dill
from torchtext import data
import pargs as arg
from copy import copy
class dataset:
def __init__(self, args):
args.path = args.datadir + args.data
self.args = args
'''
if args.loadvocab:
with open(args.datadir+"/"+args.loadvocab,'rb') as... | 10,459 | 30.601208 | 163 | py |
GraphWriter | GraphWriter-master/pargs.py | import torch
import argparse
def dynArgs(args,ds):
args.ntoks = len(ds.OUTP.vocab)
args.tgttoks = len(ds.TGT.vocab)
args.ninput = len(ds.INP.vocab)
args.vtoks = len(ds.ENT.itos)
args.rtoks = len(ds.REL.itos)
args.starttok = ds.OUTP.vocab.stoi["<start>"]
args.dottok = ds.OUTP.vocab.stoi["."]
args.ent_vo... | 5,112 | 46.342593 | 174 | py |
GraphWriter | GraphWriter-master/lastDataset.py | import torch
from collections import Counter
import dill
from torchtext import data
import pargs as arg
from copy import copy
class dataset:
def __init__(self, args):
args.path = args.datadir + args.data
print("Loading Data from ",args.path)
self.args = args
self.mkVocabs(args)
print("Vocab size... | 11,163 | 32.42515 | 135 | py |
GraphWriter | GraphWriter-master/generator.py | import torch
import argparse
from time import time
from lastDataset import dataset
from models.newmodel import model
from pargs import pargs,dynArgs
#import utils.eval as evalMetrics
def tgtreverse(tgts,entlist,order):
entlist = entlist[0]
order = [int(x) for x in order[0].split(" ")]
tgts = tgts.split(" ")
k... | 2,094 | 23.647059 | 80 | py |
GraphWriter | GraphWriter-master/train.py | import sys
from random import shuffle
import os
from math import exp
import torch
from torch import nn
from torch.nn import functional as F
from lastDataset import dataset
from pargs import pargs,dynArgs
from models.newmodel import model
def update_lr(o,args,epoch):
if epoch%args.lrstep == 0:
o.param_groups[0]['... | 3,690 | 27.612403 | 119 | py |
GraphWriter | GraphWriter-master/models/splan.py | import torch
from torch import nn
from torch.nn import functional as F
from models.attention import MultiHeadAttention
class splanner(nn.Module):
def __init__(self,args):
super().__init__()
asz = 50
self.emb = nn.Parameter(torch.zeros(1,3,asz))
nn.init.xavier_normal_(self.emb)
self.gru = nn.GRUCe... | 2,330 | 26.423529 | 76 | py |
GraphWriter | GraphWriter-master/models/graphAttn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.layers import GraphAttentionLayer, SpGraphAttentionLayer
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
"""Dense version of GAT."""
super(GAT, self).__init__()
self.dropou... | 2,260 | 40.87037 | 126 | py |
GraphWriter | GraphWriter-master/models/layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__(... | 3,735 | 33.592593 | 119 | py |
GraphWriter | GraphWriter-master/models/beam.py | import torch
from torch import nn
from torch.nn import functional as F
tt = torch.cuda if torch.cuda.is_available() else torch
class beam_obj():
def __init__(self,initword,initscore,h,c,last):
self.words = [initword]
self.score = initscore
self.h = h
self.c = c
self.last = last
self.firstwor... | 2,911 | 27.54902 | 96 | py |
GraphWriter | GraphWriter-master/models/graph_encoder.py | import torch
import math
from torch import nn
from torch.nn import functional as F
from models.graphAttn import GAT
from allennlp.modules.seq2seq_encoders.stacked_self_attention import StackedSelfAttentionEncoder
from models.attention import MultiHeadAttention
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt... | 2,979 | 36.721519 | 131 | py |
GraphWriter | GraphWriter-master/models/list_encoder.py | import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence
from allennlp.modules.elmo import Elmo
class lseq_encode(nn.Module):
def __init__(self,args,vocab=None,toks=None):
super().__init__()
if vocab:
... | 2,370 | 33.362319 | 102 | py |
GraphWriter | GraphWriter-master/models/newmodel.py | import torch
from torch import nn
from models.attention import MultiHeadAttention, MatrixAttn
from models.list_encoder import list_encode, lseq_encode
from models.last_graph import graph_encode
from models.beam import Beam
from models.splan import splanner
class model(nn.Module):
def __init__(self,args):
super()... | 8,137 | 32.767635 | 98 | py |
GraphWriter | GraphWriter-master/models/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MatrixAttn(nn.Module):
def __init__(self,linin,linout):
super().__init__()
self.attnlin = nn.Linear(linin,linout)
def get_device(self):
# return the device of the tensor, either "cpu"
# or number specifiing the index of gpu... | 10,747 | 36.712281 | 103 | py |
GraphWriter | GraphWriter-master/models/attn.py | import torch
from torch import nn
from torch.nn import functional as F
class attn(nn.Module):
def __init__(self,linin,linout):
super(attn, self).__init__()
self.attnlin = nn.Linear(linin,linout)
def forward(self,dec,emb):
emb,emask = emb #; elen = elen.cuda()
emask = (emask == 0).unsqueeze(1)
... | 678 | 29.863636 | 89 | py |
GraphWriter | GraphWriter-master/models/last_graph.py | import torch
import math
from torch import nn
from torch.nn import functional as F
from models.graphAttn import GAT
from models.attention import MultiHeadAttention
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class Block(nn.Module):
def __init__(self,a... | 3,660 | 34.543689 | 130 | py |
GraphWriter | GraphWriter-master/models/gat.py | import torch
import numpy as np
from torch import nn
from beam import Beam
import models.encoders as encoders
from models.attn import attn
from allennlp.modules.seq2seq_encoders.stacked_self_attention import StackedSelfAttentionEncoder
class model(nn.Module):
def __init__(self,args):
super().__init__()
self.... | 5,703 | 34.65 | 105 | py |
GraphWriter | GraphWriter-master/models/encoders.py | import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence
from allennlp.modules.elmo import Elmo
from models.graphAttn import GAT
from allennlp.modules.seq2seq_encoders.stacked_self_attention import StackedSelfAttentionEncoder
class e... | 5,343 | 34.865772 | 111 | py |
catboost | catboost-master/catboost/benchmarks/gpu_vs_cpu_training_speed/extract_scores_xgboost.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = "noxoomo"
__email__ = "noxoomo@yandex-team.ru"
import json
import subprocess
import os
import sys
from subprocess import Popen
import subprocess
import argparse
import os
import os.path
import pandas as pd
import numpy as np
if __name__ == '__main__':
pa... | 2,065 | 29.835821 | 101 | py |
catboost | catboost-master/catboost/benchmarks/gpu_vs_cpu_training_speed/run_experiment_xgboost.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = "noxoomo"
__email__ = "noxoomo@yandex-team.ru"
import json
import subprocess
import os
import sys
from subprocess import Popen
import subprocess
import argparse
import os
import os.path
import pandas as pd
import numpy as np
xgboost_path = "./xgboost"
fit_tem... | 8,729 | 40.179245 | 131 | py |
catboost | catboost-master/catboost/benchmarks/training_speed/plot.py | import argparse
import json
import os
import numpy as np
from matplotlib import pyplot as plt
from log_parser import read_results
FONT_DICT = {'fontsize': 20}
FIGURE_SIZE = (10, 5)
def plot_time_per_iter(tracks, figsize=FIGURE_SIZE, title=None, save_path='time_per_iter.png'):
fig = plt.figure(figsize=figsize)
... | 9,917 | 31.732673 | 118 | py |
catboost | catboost-master/catboost/benchmarks/training_speed/generate_report.py | # coding=utf-8
import argparse
import json
import numpy as np
import pandas as pd
from log_parser import read_results
def calculate_statistics(tracks, niter):
niter -= 1
best_track = None
best_quality = np.inf
best_iter = -1
median = []
low = []
high = []
total = []
for track ... | 7,237 | 27.384314 | 115 | py |
catboost | catboost-master/catboost/benchmarks/training_speed/log_parser.py | import json
import os
import re
from collections import namedtuple
import numpy as np
ALGORITHMS = [method + '-' + device_type
for device_type in ['CPU', 'GPU']
for method in ['catboost', 'xgboost', 'lightgbm']]
TIME_REGEX = r'Time: \[\s*(\d+\.?\d*)\s*\]\t'
ELAPSED_REGEX = re.compile(r'El... | 7,140 | 33.004762 | 133 | py |
catboost | catboost-master/catboost/benchmarks/training_speed/learners.py | # This file is modified version of benchmark.py.
# benchmark.py was released by RAMitchell (Copyright (c) 2018 Rory Mitchell) under MIT License
# and available at https://github.com/RAMitchell/GBM-Benchmarks/blob/master/benchmark.py
# License text is available at https://github.com/RAMitchell/GBM-Benchmarks/blob/master... | 8,340 | 28.578014 | 111 | py |
catboost | catboost-master/catboost/benchmarks/kaggle/rossmann-store-sales/xgboost_experiment_sklearn_grid_cv.py | #!/usr/bin/env python
import os.path
import numpy as np
import config
import experiment_lib
import xgboost as xgb
class XGBoostExperimentGridSearchCV(experiment_lib.ExperimentGridSearchCV):
def __init__(self, **kwargs):
super(XGBoostExperimentGridSearchCV, self).__init__(**kwargs)
def get_estima... | 1,084 | 26.820513 | 102 | py |
catboost | catboost-master/catboost/benchmarks/kaggle/rossmann-store-sales/xgboost_experiment_sklearn_random_cv.py | #!/usr/bin/env python
import os.path
import numpy as np
import scipy.stats
import config
import experiment_lib
import xgboost as xgb
class XGBoostExperimentRandomSearchCV(experiment_lib.ExperimentRandomSearchCV):
def __init__(self, **kwargs):
super(XGBoostExperimentRandomSearchCV, self).__init__(**kw... | 1,127 | 27.2 | 104 | py |
catboost | catboost-master/catboost/benchmarks/kaggle/rossmann-store-sales/experiment_hyperopt.py | #!/usr/bin/env python2
import argparse
import os
import pickle
import sys
import time
from hyperopt import hp, fmin, tpe, Trials, STATUS_OK, STATUS_FAIL
import numpy as np
import pandas as pd
from sklearn.model_selection._split import TimeSeriesSplit
import catboost as cb
import lightgbm as lgb
import xgboost as xgb... | 22,288 | 39.088129 | 125 | py |
catboost | catboost-master/catboost/benchmarks/kaggle/rossmann-store-sales/xgboost_early_stopping.py | #!/usr/bin/env python
import os.path
import config
import experiment_lib
import xgboost as xgb
class XGBoostExperimentEarlyStopping(experiment_lib.ExperimentEarlyStopping):
def __init__(self, **kwargs):
super(XGBoostExperimentEarlyStopping, self).__init__(**kwargs)
def get_estimator(self, cat_col... | 1,463 | 28.877551 | 106 | py |
catboost | catboost-master/catboost/benchmarks/quality_benchmarks/run_default.py | import sys, argparse
from experiment import Experiment
from datetime import datetime
import numpy as np
import pickle
import os
def createParser():
parser = argparse.ArgumentParser()
parser.add_argument('bst', choices=['xgb', 'lgb', 'cab'])
parser.add_argument('learning_task', choices=['classification', 'r... | 3,960 | 43.011111 | 112 | py |
catboost | catboost-master/catboost/benchmarks/quality_benchmarks/run.py | import sys, argparse
def createParser():
parser = argparse.ArgumentParser()
parser.add_argument('bst', choices=['xgb', 'lgb', 'cab'])
parser.add_argument('learning_task', choices=['classification', 'regression'])
parser.add_argument('-t', '--n_estimators', type=int, default=5000)
parser.add_argumen... | 1,531 | 41.555556 | 103 | py |
catboost | catboost-master/catboost/benchmarks/quality_benchmarks/xgboost_experiment.py | import xgboost as xgb
from hyperopt import hp
from experiment import Experiment
class XGBExperiment(Experiment):
def __init__(self, learning_task, n_estimators=5000, max_hyperopt_evals=50,
counters_sort_col=None, holdout_size=0,
train_path=None, test_path=None, cd_path=None, o... | 2,717 | 43.557377 | 98 | py |
catboost | catboost-master/catboost/benchmarks/ranking/eval_params.py | import argparse
import datetime
import json
import os
from copy import deepcopy
from sklearn.model_selection import ParameterGrid
from models import *
from utils import read_dataset
RANDOM_SEED = 0
def argmin(fn, space):
best_score = np.NINF
best_params = {}
for params in ParameterGrid(space):
... | 5,220 | 28.497175 | 114 | py |
catboost | catboost-master/catboost/benchmarks/ranking/models.py | from catboost import CatBoost, Pool
from collections import Counter
from utils import mean_ndcg
import lightgbm as lgb
import xgboost as xgb
class Data:
def __init__(self, train, test, RankerType):
self.X_train = train[0]
self.y_train = train[1]
self.queries_train = train[2]
self.... | 3,801 | 31.495726 | 102 | py |
catboost | catboost-master/catboost/python-package/catboost/widget/callbacks.py | try:
from xgboost.callback import TrainingCallback as XGBTrainingCallback
except:
class XGBTrainingCallback:
pass
from IPython.display import display
from .metrics_plotter import MetricsPlotter
class XGBPlottingCallback(XGBTrainingCallback):
'''XGBoost callback with metrics plotting widget from ... | 3,521 | 32.226415 | 82 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.