input stringlengths 33 5k | output stringlengths 32 5k |
|---|---|
from typing import Any, Dict
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. v2betastatus:: UniformTe... | from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimensi... |
from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_... | from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn... |
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_... | """Experiment with different models."""
from __future__ import annotations
from typing import List, Optional, Sequence
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.... |
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('do... | import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('doc... |
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
from torch.utils.data import DataLoader
import math
from sentence... | """
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
from torch.utils.data import DataLoader
import math
from sentence_... |
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
# model settings
model = dict(
type='YOLOX',
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
n... | _base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
# model settings
model = dict(
type='YOLOX',
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
n... |
from docarray.predefined_document.image import Image
from docarray.predefined_document.mesh import Mesh3D
from docarray.predefined_document.point_cloud import PointCloud3D
from docarray.predefined_document.text import Text
__all__ = ['Text', 'Image', 'Mesh3D', 'PointCloud3D']
| from docarray.predefined_document.image import Image
from docarray.predefined_document.text import Text
__all__ = ['Text', 'Image']
|
"""Standard LangChain interface tests."""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelU... | """Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelUn... |
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imp... | from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imp... |
# model settings
model = dict(
type='FasterRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
nu... | # model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_in... |
"""Simple reader that reads OSMmap data from overpass API."""
import random
import string
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
warnings.filterwarnings("ignore")
class OpenMap(BaseReader):
"""
Open... | """Simple reader that reads OSMmap data from overpass API."""
import random
import string
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
warnings.filterwarnings("ignore")
class OpenMap(BaseReader):
"""OpenMap R... |
_base_ = './mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... | _base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
... |
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import ... | import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import ... |
from docarray.typing.embedding import Embedding
from docarray.typing.id import ID
from docarray.typing.tensor import Tensor, TorchTensor
from docarray.typing.url import AnyUrl, ImageUrl
__all__ = ['Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID', 'TorchTensor']
| from docarray.typing.embedding import Embedding
from docarray.typing.id import ID
from docarray.typing.tensor import Tensor
from docarray.typing.url import AnyUrl, ImageUrl
__all__ = ['Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID']
|
_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=1... | _base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(inter... |
# Copyright (c) OpenMMLab. All rights reserved.
import re
from mmengine.config import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, re... | # Copyright (c) OpenMMLab. All rights reserved.
import re
from mmengine.config import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, re... |
from dataclasses import dataclass
from typing import Callable, Dict
import torch
import torchaudio
from ._vggish_impl import _SAMPLE_RATE, VGGish as _VGGish, VGGishInputProcessor as _VGGishInputProcessor
def _get_state_dict():
path = torchaudio.utils.download_asset("models/vggish.pt")
return torch.load(pat... | from dataclasses import dataclass
from typing import Callable, Dict
import torch
import torchaudio
from ._vggish_impl import _SAMPLE_RATE, VGGish as _VGGish, VGGishInputProcessor as _VGGishInputProcessor
def _get_state_dict():
path = torchaudio.utils.download_asset("models/vggish.pt")
return torch.load(path... |
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
| _base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... |
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
... | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.preprocessing import image as image
from keras.preprocessing import sequence as sequence
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_datase... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.preprocessing import image
from keras.api.preprocessing import sequence
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.text_dataset_... |
import os
import jwt # noqa
import pytest
from llama_index.core import Document
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
)
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
def t... | import os
import jwt # noqa
import pytest
from llama_index.core import Document
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
)
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
if os.... |
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SA... | import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SA... |
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET) -> None:
super().__init__(level)
def emit(self, record) -> None:
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (Key... | import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, ... |
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Ques... | """
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Ques... |
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ... | import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ... |
import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for ite... | import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.s3", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked a... |
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.typing import NdArray
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.pa... | import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
... |
"""Simple reader that turns an iterable of strings into a list of Documents."""
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class StringIterableReader(BasePydanticReader):
"""
String Iterable Reader.
Gets a list of doc... | """Simple reader that turns an iterable of strings into a list of Documents."""
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class StringIterableReader(BasePydanticReader):
"""String Iterable Reader.
Gets a list of document... |
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.torch_tensor import TorchTensor
Tensor = Union[NdArray, TorchTensor]
| from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.proto import NdArrayProto, NodeProto
T = Type... |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
d... | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
d... |
from keras._tf_keras import keras
| from keras.api._tf_keras import keras
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..... | import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..... |
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically Infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/segmentation/VOCde... | # dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
... |
# type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migrations_as_grit... | # type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migrations_as_grit... |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | # coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... |
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity =... | import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity =... |
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterB... | _base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedR... |
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Ticker News API"""
q... | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Ticker News API"""
q... |
"""Chroma Reader."""
from typing import Any, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class ChromaReader(BaseReader):
"""
Chroma reader.
Retrieve documents from existing persisted Chroma collections.
Args:
colle... | """Chroma Reader."""
from typing import Any, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class ChromaReader(BaseReader):
"""Chroma reader.
Retrieve documents from existing persisted Chroma collections.
Args:
collection... |
from .pdf_segmenter import PDFSegmenter
| from .pdf_segmenter import PDFSegmenter |
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor,... | from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor,... |
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... | # ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICE... |
"""Tool for the Google search API."""
from typing import Optional, Type
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_place... | """Tool for the Google search API."""
from typing import Optional, Type
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_place... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.t... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.t... |
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
moc... | import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
moc... |
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether ... | """Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether ... |
from typing import Any, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.Boundi... | from typing import Any, Dict, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.... |
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_grap... | from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_grap... |
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... | """
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level... |
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Type, Union
from docarray.document import BaseDocument
if TYPE_CHECKING:
from docarray.typing import NdArray, TorchTensor
class AbstractDocumentArray(Sequence):
document_type: Type[BaseDocument]
_... | from abc import abstractmethod
from typing import Iterable, Type
from docarray.document import BaseDocument
class AbstractDocumentArray(Iterable):
document_type: Type[BaseDocument]
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
__all__ = [
'get_root_logger',
'collect_env',
'find_latest_checkpoint',
]
| # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['get_root_logger', 'collect_env']
|
import os
import sys
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make ... | import os
import sys
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make ... |
import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocumen... | import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocumen... |
import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F
def plot(imgs, row_title=None, **imshow_kwargs):
if not isinstance(imgs[0], list):
# Make a 2d gr... | import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F
def plot(imgs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
... |
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... | import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser... |
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
... | # Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
... |
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_metric import (
AnswerConsistencyMetric,
)
from tonic_validate.services.... | from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_metric import (
AnswerConsistencyMetric,
)
from tonic_validate.services.... |
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
m... | from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
... |
from backend.data.block import BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, SchemaField
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
from .base import Slant3DBlockBase
class Slant3DSlicerBlock(Slant3DBl... | from backend.data.block import BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, SchemaField
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
from .base import Slant3DBlockBase
class Slant3DSlicerBlock(Slant3DBl... |
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import ... | from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import ... |
_base_ = './sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| _base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import base64
from collections import defaultdict
from typing import TYPE_CHECKING, Type
import numpy as np
if TYPE_CHECKING:
from pydantic import BaseModel
from docarray.typing import T
from docarray.document.pydantic_model import PydanticDocument
class PydanticMixin:
"""Provide helper functions to... | import base64
from collections import defaultdict
from typing import TYPE_CHECKING, Type
import numpy as np
if TYPE_CHECKING:
from pydantic import BaseModel
from ...typing import T
from ..pydantic_model import PydanticDocument
class PydanticMixin:
"""Provide helper functions to convert to/from a Pyd... |
import os
import pytest
from llama_index.llms.asi import ASI
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_completion():
# Test basic completion
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
resp = asi.complete("hello")
assert resp.text.strip() !=... | import os
import pytest
from llama_index.llms.asi import ASI
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_completion():
# Test basic completion
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
resp = asi.complete("hello")
assert resp.text.strip() !=... |
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(Base... | """Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(Base... |
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_availa... | from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_to... |
"""Smart PDF Loader."""
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SmartPDFLoader(BaseReader):
"""
SmartPDFLoader uses nested layout information such as sections, paragraphs, lists and tables to smartly... | """Smart PDF Loader."""
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SmartPDFLoader(BaseReader):
"""
SmartPDFLoader uses nested layout information such as sections, paragraphs, lists and tables to smartly ... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import MODEL_WRAPPERS
def is_model_wrapper(model):
"""Check if a module is a model wrapper.
The following 4 model in MMEngine (and their subclasses) are regarded as
model wrappers: DataParallel, DistributedDataParallel,
MMDataPara... | # Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import MODEL_WRAPPERS
def is_model_wrapper(model):
"""Check if a module is a model wrapper.
The following 4 model in MMEngine (and their subclasses) are regarded as
model wrappers: DataParallel, DistributedDataParallel,
MMDataPara... |
import warnings
from abc import ABC
from typing import Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTenso... | import warnings
from abc import ABC
from typing import Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
... |
__version__ = '0.13.20'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
| __version__ = '0.13.19'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = "3.0.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import Lo... | __version__ = "3.0.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .similarity_functions import SimilarityFuncti... |
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
... | import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
... |
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
... | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
... |
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_compressors.flashrank_rerank import (
FlashrankRerank,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... | from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_compressors.flashrank_rerank import (
FlashrankRerank,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation w... |
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt'... | _base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt'... |
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_do... | """
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_do... |
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
from qdrant_client.http.models.models import Distance
from .... import Document, DocumentArray
from ....math import ndarray
from ....score import NamedScore
if TYPE_CHECKING:
impo... | from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
from qdrant_client.http.models.models import Distance
from .... import Document, DocumentArray
from ....math import ndarray
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import... |
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class BinaryCrossEntropyLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
activation_fct: nn.Module = nn.Id... | from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class BinaryCrossEntropyLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
activation_fct: nn.Module = nn.Id... |
import os
import warnings
from modulefinder import Module
import torch
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being impor... | import os
import warnings
from modulefinder import Module
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root f... |
from __future__ import annotations
import torch
from torch import nn
# TODO: SAVING LOADING with config.json
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and a... | from __future__ import annotations
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log)... |
from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio._internal import download_url_to_file
from torchaudio.datasets.utils import _extract_zip
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCT... | from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_zip
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_CHE... |
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.crede... | from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.ut... |
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from... |
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
file_to_skip = ['fastAPI', 'jina']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a... | import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
file_to_skip = ['fastAPI', 'jina']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a... |
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cl... | """Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cl... |
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.callbacks.base im... | """**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.callbacks.base im... |
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep... | _base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio... |
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
__all__ = ['CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric']
| # Copyright (c) OpenMMLab. All rights reserved.
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
__all__ = ['CocoMetric', 'CocoPanopticMetric']
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyua... | from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyua... |
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
fr... | #!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
fr... |
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
import librosa
import pytest
from jina import Document, DocumentArray, Flow
from ... import AudioCLIPEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_fro... | __copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import librosa
from jina import Flow, Document, DocumentArray
from ... import AudioCLIPEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray... |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
... | from typing import Dict
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
Additional dic... |
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain") as ... | """Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain") as ... |
import pytest
from backend.util.service import (
AppService,
AppServiceClient,
endpoint_to_async,
expose,
get_service_client,
)
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
def cleanup(self):
pass
@classmethod
de... | import pytest
from backend.util.service import AppService, expose, get_service_client
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
def cleanup(self):
pass
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
... |
import numpy as np
from numpy.fft import __all__ as fft_all
from numpy.fft import fft2, ifft2, irfft2, rfft2
from .._internal import get_xp
from ..common import _fft
fft = get_xp(np)(_fft.fft)
ifft = get_xp(np)(_fft.ifft)
fftn = get_xp(np)(_fft.fftn)
ifftn = get_xp(np)(_fft.ifftn)
rfft = get_xp(np)(_fft.rfft)
irfft =... | from numpy.fft import * # noqa: F403
from numpy.fft import __all__ as fft_all
from ..common import _fft
from .._internal import get_xp
import numpy as np
fft = get_xp(np)(_fft.fft)
ifft = get_xp(np)(_fft.ifft)
fftn = get_xp(np)(_fft.fftn)
ifftn = get_xp(np)(_fft.ifftn)
rfft = get_xp(np)(_fft.rfft)
irfft = get_xp(np)... |
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tenso... | import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tenso... |
import hashlib
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import Any, Callable, Optional, Union
from typing_extensions import TypeAlias
import torch.fx.graph
class CustomGraphPass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method co... | import hashlib
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import Any, Callable, Optional, Union
from typing_extensions import TypeAlias
import torch.fx.graph
class CustomGraphPass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method co... |
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sente... | from . import InputExample
import os
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers"""
def __init__(self, folder, label_col_idx=0, sentence_co... |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import Executor, DocumentArray, requests
from jina_commons.batching import get_docs_batch_generator
from .audio_clip.model import AudioCLIP
class A... | __copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import Executor, DocumentArray, requests
from jina_commons.batching import get_docs_batch_generator
from audio_clip.model import AudioCLIP
class Au... |
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .distributed import MMDistributedDataParallel
from .seperate_distributed import MMSeparateDistributedDataParallel
from .utils import is_model_wrapper
__all__ = [... | # Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.parrots_wrapper import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .distributed import MMDistributedDataParallel
from .seperate_distributed import MMSeparateDistributedDataParallel
from .utils import is_model_wrapper
__al... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.