code stringlengths 161 233k | apis listlengths 1 24 | extract_api stringlengths 162 68.5k |
|---|---|---|
from typing import Any, Dict, Optional
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.legacy.core.llms.types import LLMMetadata
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
f... | [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.bridge.pydantic.Field"
] | [((548, 659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."""'}), "(description=\n 'The Neutrino router to use. See https://docs.neutrinoapp.com/router for details.'\n )\n", (553, 659), False, 'from l... |
from typing import Any, Dict, Optional
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.legacy.core.llms.types import LLMMetadata
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
f... | [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.bridge.pydantic.Field"
] | [((548, 659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."""'}), "(description=\n 'The Neutrino router to use. See https://docs.neutrinoapp.com/router for details.'\n )\n", (553, 659), False, 'from l... |
"""Tree-based index."""
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.cor... | [
"llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever",
"llama_index.core.settings.embed_model_from_settings_or_context",
"llama_index.core.indices.tree.inserter.TreeIndexInserter",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.indices.... | [((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}),... |
"""Tree-based index."""
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.cor... | [
"llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever",
"llama_index.core.settings.embed_model_from_settings_or_context",
"llama_index.core.indices.tree.inserter.TreeIndexInserter",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.indices.... | [((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}),... |
"""Tree-based index."""
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.cor... | [
"llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever",
"llama_index.core.settings.embed_model_from_settings_or_context",
"llama_index.core.indices.tree.inserter.TreeIndexInserter",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.indices.... | [((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}),... |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
Co... | [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"lla... | [((762, 827), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': 'f"""Full URL of the model. e.g. `{EXAMPLE_URL}`"""'}), "(description=f'Full URL of the model. e.g. `{EXAMPLE_URL}`')\n", (767, 827), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((880, 918), 'llama... |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
Co... | [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"lla... | [((762, 827), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': 'f"""Full URL of the model. e.g. `{EXAMPLE_URL}`"""'}), "(description=f'Full URL of the model. e.g. `{EXAMPLE_URL}`')\n", (767, 827), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((880, 918), 'llama... |
"""PII postprocessor."""
import json
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.schema ... | [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.schema.NodeWithScore"
] | [((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, ... |
"""PII postprocessor."""
import json
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.schema ... | [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.schema.NodeWithScore"
] | [((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, ... |
"""PII postprocessor."""
import json
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.schema ... | [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.schema.NodeWithScore"
] | [((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, ... |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.legacy.core.llms.types import ChatMessage, LLMMetadata
from llama_index.legacy.llms.everlyai_utils impor... | [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize"
] | [((1525, 1586), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""EverlyAI_API_KEY"""'], {}), "('api_key', api_key, 'EverlyAI_API_KEY')\n", (1546, 1586), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1486, 1... |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.legacy.core.llms.types import ChatMessage, LLMMetadata
from llama_index.legacy.llms.everlyai_utils impor... | [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize"
] | [((1525, 1586), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""EverlyAI_API_KEY"""'], {}), "('api_key', api_key, 'EverlyAI_API_KEY')\n", (1546, 1586), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1486, 1... |
"""txtai reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class TxtaiReader(BaseReader):
"""txtai reader.
Retrieves documents through an existing in-memory txtai index.
These documents... | [
"llama_index.legacy.schema.Document"
] | [((2425, 2444), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2433, 2444), False, 'from llama_index.legacy.schema import Document\n'), ((2194, 2213), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2202, 2213), False, 'from llama_... |
"""txtai reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class TxtaiReader(BaseReader):
"""txtai reader.
Retrieves documents through an existing in-memory txtai index.
These documents... | [
"llama_index.legacy.schema.Document"
] | [((2425, 2444), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2433, 2444), False, 'from llama_index.legacy.schema import Document\n'), ((2194, 2213), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2202, 2213), False, 'from llama_... |
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
"""Single select prompt.
PromptTemplate to select one out of `num_choices` options provided in `context_list`,
given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `qu... | [
"llama_index.core.prompts.base.PromptTemplate"
] | [((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core.... |
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
"""Single select prompt.
PromptTemplate to select one out of `num_choices` options provided in `context_list`,
given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `qu... | [
"llama_index.core.prompts.base.PromptTemplate"
] | [((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core.... |
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
"""Single select prompt.
PromptTemplate to select one out of `num_choices` options provided in `context_list`,
given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `qu... | [
"llama_index.core.prompts.base.PromptTemplate"
] | [((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core.... |
"""Awadb reader."""
from typing import Any, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class AwadbReader(BaseReader):
"""Awadb reader.
Retrieves documents through an existing awadb client.
These documents ... | [
"llama_index.legacy.schema.Document"
] | [((1780, 1824), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': "item_detail['embedding_text']"}), "(text=item_detail['embedding_text'])\n", (1788, 1824), False, 'from llama_index.legacy.schema import Document\n'), ((2042, 2061), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), ... |
"""Mongo client."""
from typing import Dict, Iterable, List, Optional, Union
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
... | [
"llama_index.legacy.schema.Document"
] | [((887, 903), 'pymongo.MongoClient', 'MongoClient', (['uri'], {}), '(uri)\n', (898, 903), False, 'from pymongo import MongoClient\n'), ((953, 976), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {}), '(host, port)\n', (964, 976), False, 'from pymongo import MongoClient\n'), ((3133, 3152), 'llama_index.legacy.... |
"""Mongo client."""
from typing import Dict, Iterable, List, Optional, Union
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
... | [
"llama_index.legacy.schema.Document"
] | [((887, 903), 'pymongo.MongoClient', 'MongoClient', (['uri'], {}), '(uri)\n', (898, 903), False, 'from pymongo import MongoClient\n'), ((953, 976), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {}), '(host, port)\n', (964, 976), False, 'from pymongo import MongoClient\n'), ((3133, 3152), 'llama_index.legacy.... |
from typing import Any, Callable, Optional, Sequence
from typing_extensions import override
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types im... | [
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((660, 673), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (671, 673), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((687, 700), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (698, 700), False, 'from llama_index... |
from typing import Any, Callable, Optional, Sequence
from typing_extensions import override
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types im... | [
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((660, 673), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (671, 673), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((687, 700), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (698, 700), False, 'from llama_index... |
from typing import Dict, Type
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
MockEmbedding.class_name(): MockEmbedding,
}
# conditionals for llama-cloud support
try:
... | [
"llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name",
"llama_index.embeddings.openai.OpenAIEmbedding.class_name"
] | [((229, 255), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name', 'MockEmbedding.class_name', ([], {}), '()\n', (253, 255), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((431, 459), 'llama_index.embeddings.openai.OpenAIEmbedding.class_name', 'OpenAIEmbedding.c... |
from typing import Dict, Type
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
MockEmbedding.class_name(): MockEmbedding,
}
# conditionals for llama-cloud support
try:
... | [
"llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name",
"llama_index.embeddings.openai.OpenAIEmbedding.class_name"
] | [((229, 255), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name', 'MockEmbedding.class_name', ([], {}), '()\n', (253, 255), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((431, 459), 'llama_index.embeddings.openai.OpenAIEmbedding.class_name', 'OpenAIEmbedding.c... |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms import OpenAI, Gemini
from llama_index.core import ServiceContext
import pandas as pd
async d... | [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.evaluation.CorrectnessEvaluator",
"llama_index.llms.Gemini",
"llama_index.llms.OpenAI",
"llama_index.core.llama_dataset.download_llama_dataset"
] | [((386, 471), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniMtBenchSingleGradingDataset"""', '"""./mini_mt_bench_data"""'], {}), "('MiniMtBenchSingleGradingDataset',\n './mini_mt_bench_data')\n", (408, 471), False, 'from llama_index.core.llama_dataset import download_ll... |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms import OpenAI, Gemini
from llama_index.core import ServiceContext
import pandas as pd
async d... | [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.evaluation.CorrectnessEvaluator",
"llama_index.llms.Gemini",
"llama_index.llms.OpenAI",
"llama_index.core.llama_dataset.download_llama_dataset"
] | [((386, 471), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniMtBenchSingleGradingDataset"""', '"""./mini_mt_bench_data"""'], {}), "('MiniMtBenchSingleGradingDataset',\n './mini_mt_bench_data')\n", (408, 471), False, 'from llama_index.core.llama_dataset import download_ll... |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 330), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""PaulGrahamEssayDataset"""', '"""./paul_graham"""'], {}), "('PaulGrahamEssayDataset', './paul_graham')\n", (287, 330), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((389, 441), 'llama_... |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 330), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""PaulGrahamEssayDataset"""', '"""./paul_graham"""'], {}), "('PaulGrahamEssayDataset', './paul_graham')\n", (287, 330), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((389, 441), 'llama_... |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAUL... | [
"llama_index.core.langchain_helpers.agents.tools.IndexToolConfig",
"llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.core.tools.types.ToolMetadata"
] | [((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langch... |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAUL... | [
"llama_index.core.langchain_helpers.agents.tools.IndexToolConfig",
"llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.core.tools.types.ToolMetadata"
] | [((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langch... |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAUL... | [
"llama_index.core.langchain_helpers.agents.tools.IndexToolConfig",
"llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.core.tools.types.ToolMetadata"
] | [((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langch... |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatRe... | [
"llama_index.legacy.llms.openai_utils.from_openai_message_dict",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatResponse",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.type... | [((868, 916), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The llama-api model to use."""'}), "(description='The llama-api model to use.')\n", (873, 916), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((942, 999), 'llama_index.legacy.bridge.pydantic.Fiel... |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatRe... | [
"llama_index.legacy.llms.openai_utils.from_openai_message_dict",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatResponse",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.type... | [((868, 916), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The llama-api model to use."""'}), "(description='The llama-api model to use.')\n", (873, 916), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((942, 999), 'llama_index.legacy.bridge.pydantic.Fiel... |
"""Download tool from Llama Hub."""
from typing import Optional, Type
from llama_index.legacy.download.module import (
LLAMA_HUB_URL,
MODULE_TYPE,
download_llama_module,
track_download,
)
from llama_index.legacy.tools.tool_spec.base import BaseToolSpec
def download_tool(
tool_class: str,
lla... | [
"llama_index.legacy.download.module.track_download",
"llama_index.legacy.download.module.download_llama_module"
] | [((867, 1047), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['tool_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_dir': '"""tools"""', 'custom_path': 'custom_path', 'library_path': '"""tools/library.json"""'}), "(tool_class, llama_hub_url=l... |
"""Download tool from Llama Hub."""
from typing import Optional, Type
from llama_index.legacy.download.module import (
LLAMA_HUB_URL,
MODULE_TYPE,
download_llama_module,
track_download,
)
from llama_index.legacy.tools.tool_spec.base import BaseToolSpec
def download_tool(
tool_class: str,
lla... | [
"llama_index.legacy.download.module.track_download",
"llama_index.legacy.download.module.download_llama_module"
] | [((867, 1047), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['tool_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_dir': '"""tools"""', 'custom_path': 'custom_path', 'library_path': '"""tools/library.json"""'}), "(tool_class, llama_hub_url=l... |
"""Simple Engine."""
import json
import os
from typing import Any, Optional, Union
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model im... | [
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.ingestion.pipeline.run_transformations",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding",
"llama_index.core.schema.QueryBundle",
"llama_index.core.SimpleDirect... | [((7145, 7220), 'llama_index.core.ingestion.pipeline.run_transformations', 'run_transformations', (['documents'], {'transformations': 'self.index._transformations'}), '(documents, transformations=self.index._transformations)\n', (7164, 7220), False, 'from llama_index.core.ingestion.pipeline import run_transformations\n... |
from collections import ChainMap
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Protocol,
Sequence,
get_args,
runtime_checkable,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponseAsyncGen... | [
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.instrumentation.... | [((1325, 1360), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1350, 1360), True, 'import llama_index.core.instrumentation as instrument\n'), ((3081, 3144), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"... |
import os
from typing import Optional, Dict
import openai
import pandas as pd
import llama_index
from llama_index.llms.openai import OpenAI
from llama_index.readers.schema.base import Document
from llama_index.readers import SimpleWebPageReader
from llama_index.prompts import PromptTemplate
from llama_index import Se... | [
"llama_index.readers.SimpleWebPageReader",
"llama_index.llms.openai.OpenAI",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.prompts.PromptTemplate",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.readers.schema.b... | [((9647, 9699), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_path'}), '(persist_dir=index_path)\n', (9675, 9699), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((9770, 9843), 'llama_index.load_index_from_storage', ... |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without res... | [
"llama_index.llms.base.llm_chat_callback",
"llama_index.llms.base.LLMMetadata",
"llama_index.bridge.pydantic.Field",
"llama_index.llms.generic_utils.completion_response_to_chat_response",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.llms.base.llm_completion_callback"
] | [((2151, 2199), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The path to the trt engine."""'}), "(description='The path to the trt engine.')\n", (2156, 2199), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2239, 2296), 'llama_index.bridge.pydantic.Field', 'Field', ([... |
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.llms import ChatMessage, ChatResponse
from llama_index.core.schema import NodeWithScore, TextNode
import chainlit as cl
@cl.on_chat_start
async def start():
await cl.Message(content="LlamaIndexCb").send()
cb = cl.L... | [
"llama_index.core.schema.TextNode",
"llama_index.core.llms.ChatMessage"
] | [((316, 346), 'chainlit.LlamaIndexCallbackHandler', 'cl.LlamaIndexCallbackHandler', ([], {}), '()\n', (344, 346), True, 'import chainlit as cl\n'), ((415, 428), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], {}), '(0.2)\n', (423, 428), True, 'import chainlit as cl\n'), ((691, 704), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], ... |
import requests
from bs4 import BeautifulSoup
from llama_index import GPTSimpleVectorIndex
from llama_index.readers.database import DatabaseReader
from env import settings
from logger import logger
from .base import BaseToolSet, SessionGetter, ToolScope, tool
class RequestsGet(BaseToolSet):
@tool(
name=... | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.readers.database.DatabaseReader"
] | [((713, 732), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {}), '(html)\n', (726, 732), False, 'from bs4 import BeautifulSoup\n'), ((1073, 1166), 'logger.logger.debug', 'logger.debug', (['f"""\nProcessed RequestsGet, Input Url: {url} Output Contents: {content}"""'], {}), '(\n f"""\nProcessed RequestsGet, Input U... |
try:
from llama_index import Document
from llama_index.text_splitter import SentenceSplitter
except ImportError:
from llama_index.core import Document
from llama_index.core.text_splitter import SentenceSplitter
def llama_index_sentence_splitter(
documents: list[str], document_ids: list[str], chunk... | [
"llama_index.core.text_splitter.SentenceSplitter",
"llama_index.core.Document"
] | [((432, 500), 'llama_index.core.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (448, 500), False, 'from llama_index.core.text_splitter import SentenceSplitter\n'), ((514, 532), 'llama_in... |
"""
Creates RAG dataset for tutorial notebooks and persists to disk.
"""
import argparse
import logging
import sys
from typing import List, Optional
import llama_index
import numpy as np
import pandas as pd
from gcsfs import GCSFileSystem
from llama_index import ServiceContext, StorageContext, load_index_from_storage... | [
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.callbacks.OpenInferenceCallbackHandler",
"llama_index.load_index_from_storage",
"llama_index.callbacks.CallbackManager",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1235, 1270), 'numpy.array', 'np.array', (['first_document_relevances'], {}), '(first_document_relevances)\n', (1243, 1270), True, 'import numpy as np\n'), ((1310, 1346), 'numpy.array', 'np.array', (['second_document_relevances'], {}), '(second_document_relevances)\n', (1318, 1346), True, 'import numpy as np\n'), ((1... |
import logging
import os
import time
import typing
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional
import numpy as np
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.vector_stores.types import (
VectorStore,
VectorStoreQuery,
VectorSto... | [
"llama_index.core.vector_stores.utils.metadata_dict_to_node",
"llama_index.core.vector_stores.types.VectorStoreQueryResult",
"llama_index.core.vector_stores.utils.legacy_metadata_dict_to_node",
"llama_index.core.vector_stores.utils.node_to_metadata_dict"
] | [((524, 551), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (541, 551), False, 'import logging\n'), ((8582, 8767), 'vearch.GammaVectorInfo', 'vearch.GammaVectorInfo', ([], {'name': '"""text_embedding"""', 'type': 'vearch.dataType.VECTOR', 'is_index': '(True)', 'dimension': 'dim', 'model_... |
# ENTER YOUR OPENAPI KEY IN OPENAI_API_KEY ENV VAR FIRST
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader
savePath = f'/{os.path.dirname(__file__)}/indexes/index.json'
#
# index = GPTSimpleVectorIndex(documents)#, llm_predictor=llm_predictor)
index = GPTSimpleVectorIn... | [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((303, 348), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['savePath'], {}), '(savePath)\n', (338, 348), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader\n')] |
from typing import Optional, Union
from llama_index import ServiceContext
from llama_index.callbacks import CallbackManager
from llama_index.embeddings.utils import EmbedType
from llama_index.extractors import (
EntityExtractor,
KeywordExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
TitleE... | [
"llama_index.extractors.TitleExtractor",
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.PromptTemplate",
"llama_index.extractors.KeywordExtractor",
"llama_index.extractors.QuestionsAnsweredExtractor",
"llama_index.callbacks.CallbackManager",
"llama_index.text_splitter.SentenceSplitter"... | [((3952, 4020), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3968, 4020), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((4643, 4954), 'llama_index.... |
import torch
from llama_index import WikipediaReader
def divide_string(wiki_page, word_limit=50):
divided_text = []
for each_page in wiki_page:
words = each_page[0].text.split()
for i in range(0, len(words), word_limit):
chunk = ' '.join(words[i:i+word_limit])
... | [
"llama_index.WikipediaReader"
] | [((933, 948), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (946, 948), False, 'import torch\n'), ((3638, 3653), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3651, 3653), False, 'import torch\n'), ((1958, 1975), 'llama_index.WikipediaReader', 'WikipediaReader', ([], {}), '()\n', (1973, 1975), False, 'from... |
from rag.agents.interface import Pipeline
from llama_index.core.program import LLMTextCompletionProgram
import json
from llama_index.llms.ollama import Ollama
from typing import List
from pydantic import create_model
from rich.progress import Progress, SpinnerColumn, TextColumn
import requests
import warnings
import bo... | [
"llama_index.core.program.LLMTextCompletionProgram.from_defaults",
"llama_index.llms.ollama.Ollama"
] | [((396, 458), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (419, 458), False, 'import warnings\n'), ((459, 514), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarnin... |
import asyncio
import chromadb
import os
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from traceloop.sdk import Traceloop
os.environ["TOKENIZERS_P... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader",
"llama_index.vector_stores.chroma.ChromaVectorStore"
] | [((344, 390), 'traceloop.sdk.Traceloop.init', 'Traceloop.init', ([], {'app_name': '"""llama_index_example"""'}), "(app_name='llama_index_example')\n", (358, 390), False, 'from traceloop.sdk import Traceloop\n'), ((408, 434), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (432, 434), False, 'i... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ================================================== #
# This file is a part of PYGPT package #
# Website: https://pygpt.net #
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License ... | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage"
] | [((3275, 3321), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'path'}), '(persist_dir=path)\n', (3303, 3321), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((3384, 3457), 'llama_index.core.load_index_from_storage', 'load_index_f... |
import streamlit as st
from sqlalchemy import create_engine, inspect, text
from typing import Dict, Any
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llama_pack.base import BaseLlamaPack
from llama_index.llms import OpenAI
import openai
import os
import pan... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.SQLDatabase",
"llama_index.indices.struct_store.NLSQLTableQueryEngine"
] | [((1194, 1309), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'f"""{self.page}"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=f'{self.page}', layout='centered',\n initial_sidebar_state='auto', menu_items=None)\n", (1212, 1309), Tr... |
#!/usr/bin/env python3
from flask import Flask, request
from werkzeug.utils import secure_filename
from llama_index import GPTSimpleVectorIndex, download_loader
import json
import secrets
app = Flask(__name__)
@app.route('/index', methods = ['GET', 'POST'])
def upload_and_index():
if request.method == "POST"... | [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex",
"llama_index.download_loader"
] | [((199, 214), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (204, 214), False, 'from flask import Flask, request\n'), ((893, 947), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['f"""{data_id}.json"""'], {}), "(f'{data_id}.json')\n", (928, 947), False, 'from ll... |
from contextlib import contextmanager
import uuid
import os
import tiktoken
from . import S2_tools as scholar
import csv
import sys
import requests
# pdf loader
from langchain.document_loaders import OnlinePDFLoader
## paper questioning tools
from llama_index import Document
from llama_index.vector_stores import Pi... | [
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((768, 796), 'os.mkdir', 'os.mkdir', (['workspace_dir_name'], {}), '(workspace_dir_name)\n', (776, 796), False, 'import os\n'), ((5950, 5986), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (5977, 5986), False, 'import tiktoken\n'), ((7532, 7548), 'os.listdir', 'os.... |
import os
import logging
import sys
from llama_index import GPTSimpleVectorIndex
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# 加载索引
new_index = GPTSimpleVectorIndex.load_from_disk('index.json')
# 查询索引
response = new_index.query("W... | [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((82, 140), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (101, 140), False, 'import logging\n'), ((234, 283), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""index.json... |
import os
import openai
from fastapi import FastAPI, HTTPException
from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context
from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer
from llama_index.embeddings import OpenAIEmbedding
from pydantic import... | [
"llama_index.indices.postprocessor.SentenceEmbeddingOptimizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.embeddings.OpenAIEmbedding",
"llama_index.set_global_service_context",
"llama_index.load_index_from_storage"
] | [((385, 394), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (392, 394), False, 'from fastapi import FastAPI, HTTPException\n'), ((510, 546), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'embed_batch_size': '(10)'}), '(embed_batch_size=10)\n', (525, 546), False, 'from llama_index.embeddings impor... |
"""Example of how to use llamaindex for semantic search.
This example assumes that initially there is a projects.DATASETS_DIR_PATH/embeddings.pkl file
that has a list of dictionaries with each dictionary containing "text",
"rule_name" and "section_label" fields.
The first time you run this script, a vector store will... | [
"llama_index.get_response_synthesizer",
"llama_index.VectorStoreIndex",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.schema.TextNode",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.postprocessor.SimilarityPostprocessor",
"ll... | [((1802, 1832), 'pathlib.Path', 'Path', (['"""cache/msrb_index_store"""'], {}), "('cache/msrb_index_store')\n", (1806, 1832), False, 'from pathlib import Path\n'), ((1726, 1783), 'os.path.join', 'os.path.join', (['project.DATASETS_DIR_PATH', '"""embeddings.pkl"""'], {}), "(project.DATASETS_DIR_PATH, 'embeddings.pkl')\n... |
from dotenv import load_dotenv
load_dotenv()
from llama_index import GPTVectorStoreIndex, TrafilaturaWebReader
import chromadb
def create_embedding_store(name):
chroma_client = chromadb.Client()
return chroma_client.create_collection(name)
def query_pages(collection, urls, questions):
docs = Trafilatur... | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.TrafilaturaWebReader"
] | [((32, 45), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (43, 45), False, 'from dotenv import load_dotenv\n'), ((185, 202), 'chromadb.Client', 'chromadb.Client', ([], {}), '()\n', (200, 202), False, 'import chromadb\n'), ((361, 431), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_... |
import logging
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
import requests
from typing import List
import re
import os
import logging
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
import requests
from typing ... | [
"llama_index.readers.schema.base.Document"
] | [((1299, 1406), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': 'messages', 'temperature': '(0.5)', 'max_tokens': '(256)'}), "(model='gpt-3.5-turbo', messages=messages,\n temperature=0.5, max_tokens=256)\n", (1327, 1406), False, 'import openai\n'), ((... |
from llama_index.embeddings import LinearAdapterEmbeddingModel, resolve_embed_model
from llama_index.finetuning import EmbeddingQAFinetuneDataset
import pickle
from eval_utils import evaluate, display_results
def run_eval(val_data: str) -> None:
val_dataset = EmbeddingQAFinetuneDataset.from_json(val_data)
print("L... | [
"llama_index.embeddings.LinearAdapterEmbeddingModel",
"llama_index.finetuning.EmbeddingQAFinetuneDataset.from_json",
"llama_index.embeddings.resolve_embed_model"
] | [((264, 310), 'llama_index.finetuning.EmbeddingQAFinetuneDataset.from_json', 'EmbeddingQAFinetuneDataset.from_json', (['val_data'], {}), '(val_data)\n', (300, 310), False, 'from llama_index.finetuning import EmbeddingQAFinetuneDataset\n'), ((401, 438), 'llama_index.embeddings.resolve_embed_model', 'resolve_embed_model'... |
"""Simple horoscope predictions generator."""
from typing import List, Optional, Dict, Callable
import re
import json
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from vedastro import *
class SimpleB... | [
"llama_index.core.Document",
"llama_index.core.schema.NodeWithScore",
"llama_index.core.bridge.pydantic.PrivateAttr"
] | [((767, 780), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (778, 780), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((8054, 8115), 're.sub', 're.sub', (['"""((?<=[a-z])[A-Z]|(?<!\\\\A)[A-Z](?=[a-z]))"""', '""" \\\\1"""', 's'], {}), "('((?<=[a-z])[A-Z]|(?<!\\\... |
import os
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader
from flask import Flask, render_template, jsonify, request
index = None
# set up the index, either load it from disk to create it on the fly
def initialise_index():
global index
if os.path.exists(os.environ["INDEX_FILE"]):
in... | [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex.from_documents"
] | [((756, 819), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': 'gui_dir', 'template_folder': 'gui_dir'}), '(__name__, static_folder=gui_dir, template_folder=gui_dir)\n', (761, 819), False, 'from flask import Flask, render_template, jsonify, request\n'), ((268, 308), 'os.path.exists', 'os.path.exists', (["os.env... |
from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex
'''
Title of the page: A simple Python implementation of the ReAct pattern for LLMs
Name of the website: LlamaIndex (GPT Index) is a data framewor... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.callbacks.CallbackManager"
] | [((676, 718), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (693, 718), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((738, 768), 'llama_index.callbacks.CallbackManager', 'CallbackM... |
import logging
import os
from llama_index import (
StorageContext,
load_index_from_storage,
)
from app.engine.constants import STORAGE_DIR
from app.engine.context import create_service_context
def get_chat_engine():
service_context = create_service_context()
# check if storage already exists
if n... | [
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((249, 273), 'app.engine.context.create_service_context', 'create_service_context', ([], {}), '()\n', (271, 273), False, 'from app.engine.context import create_service_context\n'), ((507, 535), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (524, 535), False, 'import logging\n'), (... |
"""Module for loading index."""
import logging
from typing import TYPE_CHECKING, Any, Optional
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index.indices.base import BaseIndex
from ols.app.models.config import ReferenceContent
# This is to avoid importing HuggingFaceBge... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((661, 688), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (678, 688), False, 'import logging\n'), ((2376, 2445), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self._embed_model', 'llm': 'None'}), '(embed_model=self._embed_model, llm=N... |
from llama_index import PromptTemplate
instruction_str = """\
1. Convert the query to executable Python code using Pandas.
2. The final line of code should be a Python expression that can be called with the `eval()` function.
3. The code should represent a solution to the query.
4. PRINT ONLY THE EXPR... | [
"llama_index.PromptTemplate"
] | [((381, 660), 'llama_index.PromptTemplate', 'PromptTemplate', (['""" You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expressi... |
import os, shutil, datetime, time, json
import gradio as gr
import sys
import os
from llama_index import GPTSimpleVectorIndex
bank_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../memory_bank')
sys.path.append(bank_path)
from build_memory_index import build_memory_index
memory_bank_path = os.path.joi... | [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((213, 239), 'sys.path.append', 'sys.path.append', (['bank_path'], {}), '(bank_path)\n', (228, 239), False, 'import sys\n'), ((384, 417), 'sys.path.append', 'sys.path.append', (['memory_bank_path'], {}), '(memory_bank_path)\n', (399, 417), False, 'import sys\n'), ((882, 945), 'os.path.join', 'os.path.join', (['data_ar... |
from llama_index import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
from tqdm import tqdm
import arxiv
import os
import argparse
import yaml
import qdrant_client
from langchain.embeddings.huggingface import H... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.Ollama"
] | [((2566, 2591), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2589, 2591), False, 'import argparse\n'), ((970, 984), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (982, 984), False, 'import arxiv\n'), ((1003, 1108), 'arxiv.Search', 'arxiv.Search', ([], {'query': 'search_query', 'max_resul... |
from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage
from llama_index.storage.storage_context import StorageContext
from llama_index.indices.service_context import ServiceContext
from llama_index.llms import OpenAI
from llama_index.node_parser import SimpleNodeParser
from llama_index... | [
"llama_index.node_parser.extractors.TitleExtractor",
"llama_index.SimpleDirectoryReader",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.node_parser.extractors.SummaryExtractor",
"llama_index.VectorStoreIndex",
"llama_index.indices.service_context.ServiceContext.from_defa... | [((692, 705), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (703, 705), False, 'from dotenv import load_dotenv\n'), ((724, 751), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (733, 751), False, 'import sys, os\n'), ((781, 839), 'logging.basicConfig', 'logging.basicConfig', (... |
# qa_template.py
from llama_index import QuestionAnswerPrompt
# define custom QuestionAnswerPrompt
QA_PROMPT_TMPL = (
"We have provided context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this context information, please answer the question:... | [
"llama_index.QuestionAnswerPrompt"
] | [((627, 663), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['QA_PROMPT_TMPL'], {}), '(QA_PROMPT_TMPL)\n', (647, 663), False, 'from llama_index import QuestionAnswerPrompt\n')] |
from typing import Any, Optional, Sequence, Type, cast
from llama_index.data_structs.data_structs_v2 import (
IndexDict,
OpensearchIndexDict,
)
from llama_index.data_structs.node_v2 import Node
from llama_index.indices.base import BaseGPTIndex, QueryMap
from llama_index.indices.query.schema import QueryMode
f... | [
"llama_index_fix.elasticsearch.ElasticsearchVectorStore"
] | [((1075, 1107), 'llama_index_fix.elasticsearch.ElasticsearchVectorStore', 'ElasticsearchVectorStore', (['client'], {}), '(client)\n', (1099, 1107), False, 'from llama_index_fix.elasticsearch import ElasticsearchVectorStore, ElasticsearchVectorClient\n'), ((1771, 1821), 'typing.cast', 'cast', (['ElasticsearchVectorStore... |
import os
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.bridge.pydantic import Field, PrivateAttr
from llama_index.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.llms.... | [
"llama_index.llms.base.llm_chat_callback",
"llama_index.bridge.pydantic.Field",
"llama_index.llms.generic_utils.completion_response_to_chat_response",
"llama_index.core.llms.types.LLMMetadata",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.llms.base.llm_completion_callback",
"llama_index.core.... | [((858, 926), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to print verbose output."""'}), "(default=False, description='Whether to print verbose output.')\n", (863, 926), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((974, 987), 'llama_... |
from byzerllm.utils.client import ByzerLLM
from byzerllm.utils.retrieval import ByzerRetrieval
from byzerllm.apps.llama_index.byzerai import ByzerAI
from byzerllm.apps.llama_index.byzerai_embedding import ByzerAIEmbedding
from byzerllm.apps.llama_index.byzerai_docstore import ByzerAIDocumentStore
from byzerllm.apps.lla... | [
"llama_index.storage.StorageContext.from_defaults"
] | [((1041, 1129), 'byzerllm.apps.llama_index.byzerai_vectordb.ByzerAIVectorStore', 'ByzerAIVectorStore', ([], {'llm': 'llm', 'retrieval': 'retrieval', 'chunk_collection': 'chunk_collection'}), '(llm=llm, retrieval=retrieval, chunk_collection=\n chunk_collection)\n', (1059, 1129), False, 'from byzerllm.apps.llama_index... |
#model_settings.py
import streamlit as st
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext
from llama_index.logger import LlamaLogger
from langchain.chat_models import ChatOpenAI
from langchain imp... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.logger.LlamaLogger",
"llama_index.PromptHelper"
] | [((705, 751), 'streamlit.selectbox', 'st.selectbox', (['"""Sentence transformer:"""', 'options'], {}), "('Sentence transformer:', options)\n", (717, 751), True, 'import streamlit as st\n'), ((1220, 1279), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_inpu... |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.base import CallbackManager
from llama_inde... | [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.schema.TextNode.parse_obj",
"llama_index.core.settings.llm... | [((1182, 1924), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only c... |
"""
# My first app
Here's our first attempt at using data to create a table:
"""
import logging
import sys
import streamlit as st
from clickhouse_connect import common
from llama_index.core.settings import Settings
from llama_index.embeddings.fastembed import FastEmbedEmbedding
from llama_index.llms.openai import Open... | [
"llama_index.core.SQLDatabase",
"llama_index.llms.openai.OpenAI",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.tools.QueryEngineTool.from_defaults",
"llama_index.core.vector_stores.types.MetadataInfo",
"llama_index.core.PromptTemplate",
"llama_index.embeddings.fastembed.FastE... | [((1100, 1158), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1119, 1158), False, 'import logging\n'), ((1713, 1957), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Get summaries of Hacker ... |
import chromadb
import openai
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
load_dotenv()
from llama_index.llms import OpenAI
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
import os
OPENAI_API_KEY = os.getenv('OPENAI_API_... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.ChromaVectorStore"
] | [((107, 120), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (118, 120), False, 'from dotenv import load_dotenv\n'), ((298, 325), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (307, 325), False, 'import os\n'), ((390, 434), 'chromadb.PersistentClient', 'chromadb.PersistentCli... |
import tempfile
import llama_index
from llama_index import SimpleDirectoryReader
import aiohttp
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from models.statics_model import ResponseStatics, g_index, file_extensions_mappings
def upload_doc_handler(knowledgebase_id, file):
if not knowledgebase_... | [
"llama_index.BeautifulSoupWebReader",
"llama_index.SimpleDirectoryReader"
] | [((615, 671), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'suffix'}), '(delete=False, suffix=suffix)\n', (642, 671), False, 'import tempfile\n'), ((1063, 1086), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1084, 1086), False, 'import aiohttp... |
import os
from dotenv import load_dotenv
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor
from langchain.chat_models import ChatOpenAI
load_dotenv()
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_KEY')
def tune_llm(input_directory="sourcedata", output_file="indexdata/index.json"):
... | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((169, 182), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (180, 182), False, 'from dotenv import load_dotenv\n'), ((215, 238), 'os.getenv', 'os.getenv', (['"""OPENAI_KEY"""'], {}), "('OPENAI_KEY')\n", (224, 238), False, 'import os\n'), ((506, 571), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', ... |
from ..conversable_agent import ConversableAgent
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from ....utils.client import ByzerLLM
from byzerllm.utils.retrieval import ByzerRetrieval
from ..agent import Agent
import ray
from ray.util.client.common import ClientActorHandle, ClientObjectRef... | [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.tools.ToolMetadata"
] | [((2438, 2462), 'byzerllm.apps.llama_index.get_service_context', 'get_service_context', (['llm'], {}), '(llm)\n', (2457, 2462), False, 'from byzerllm.apps.llama_index import get_service_context, get_storage_context\n'), ((2494, 2529), 'byzerllm.apps.llama_index.get_storage_context', 'get_storage_context', (['llm', 'ret... |
# Copyright 2023 osiworx
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
#... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((905, 960), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'url': '"""http://localhost:6333"""'}), "(url='http://localhost:6333')\n", (931, 960), False, 'import qdrant_client\n'), ((1352, 1426), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""sentence-... |
from typing import Union, Optional, List
from llama_index.chat_engine.types import BaseChatEngine, ChatMode
from llama_index.embeddings.utils import EmbedType
from llama_index.chat_engine import ContextChatEngine
from llama_index.memory import ChatMemoryBuffer
from lyzr.base.llm import LyzrLLMFactory
from lyzr.base.s... | [
"llama_index.memory.ChatMemoryBuffer.from_defaults"
] | [((1242, 1430), 'lyzr.utils.document_reading.read_pdf_as_documents', 'read_pdf_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_f... |
import json
from util import rm_file
from tqdm import tqdm
import argparse
from copy import deepcopy
import os
from util import JSONReader
import openai
from typing import List, Dict
from llama_index import (
ServiceContext,
OpenAIEmbedding,
PromptHelper,
VectorStoreIndex,
set_global_service_cont... | [
"llama_index.embeddings.cohereai.CohereEmbedding",
"llama_index.embeddings.VoyageEmbedding",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.llms.OpenAI",
"llama_index.ingestion.IngestionPipeline",
"llama_index.set_global_service_context",
"llama_index.schema.Qu... | [((1340, 1395), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""', '"""your_openai_api_key"""'], {}), "('OPENAI_API_KEY', 'your_openai_api_key')\n", (1354, 1395), False, 'import os\n'), ((1455, 1510), 'os.environ.get', 'os.environ.get', (['"""VOYAGE_API_KEY"""', '"""your_voyage_api_key"""'], {}), "('VOYAGE_A... |
import pinecone
import torch
import numpy as np
import torchvision.transforms as T
from PIL import Image
import os
import tqdm
import openai
import hashlib
import io
from gradio_client import Client
from monitor import Monitor, monitoring
from llama_index.vector_stores import PineconeVectorStore
from llama_index import... | [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.schema.TextNode",
"llama_index.vector_stores.PineconeVectorStore"
] | [((945, 950), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (948, 950), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1012, 1020), 'trulens_eval.feedback.provider.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (1018, 1020), False, 'from trulens_eval.feedback.provider.openai import OpenAI\n'), ((1697, 1746... |
############################################################################################################################
# In this section, we set the user authentication, model URL, and prompt text. Alternatively, set the user and app ID,
# and model name. Change these strings to run your own example.
############... | [
"llama_index.embeddings.clarifai.ClarifaiEmbedding"
] | [((1158, 1196), 'llama_index.embeddings.clarifai.ClarifaiEmbedding', 'ClarifaiEmbedding', ([], {'model_url': 'MODEL_URL'}), '(model_url=MODEL_URL)\n', (1175, 1196), False, 'from llama_index.embeddings.clarifai import ClarifaiEmbedding\n')] |
# Copyright 2023 Qarik Group, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ... | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.query_engine.transform_query_engine.TransformQueryEngine",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.query.query_transform.base.DecomposeQueryTransform",
"llama_index.load_index_from_storag... | [((1710, 1726), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1724, 1726), False, 'import threading\n'), ((1794, 1810), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1808, 1810), False, 'import threading\n'), ((1950, 1991), 'common.solution.getenv', 'solution.getenv', (['"""EMBEDDINGS_BUCKET_NAME"""']... |
import os
from dotenv import load_dotenv, find_dotenv
import numpy as np
from trulens_eval import (
Feedback,
TruLlama,
OpenAI
)
from trulens_eval.feedback import Groundedness
import nest_asyncio
nest_asyncio.apply()
def get_openai_api_key():
_ = load_dotenv(find_dotenv())
return os.getenv("O... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.fro... | [((212, 232), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (230, 232), False, 'import nest_asyncio\n'), ((450, 458), 'trulens_eval.OpenAI', 'OpenAI', ([], {}), '()\n', (456, 458), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((855, 897), 'trulens_eval.feedback.Groundedness', 'Ground... |
import tiktoken
import sys
from llama_index.readers.file import PyMuPDFReader
from llama_index.core.node_parser import TokenTextSplitter
index = int(sys.argv[1])
docs = PyMuPDFReader().load("Hamlet.pdf")
combined = ""
for doc in docs:
combined += doc.text
splitter = TokenTextSplitter(
chunk_size=10000,
c... | [
"llama_index.readers.file.PyMuPDFReader"
] | [((495, 506), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (503, 506), False, 'import sys\n'), ((170, 185), 'llama_index.readers.file.PyMuPDFReader', 'PyMuPDFReader', ([], {}), '()\n', (183, 185), False, 'from llama_index.readers.file import PyMuPDFReader\n'), ((351, 387), 'tiktoken.encoding_for_model', 'tiktoken.en... |
# The MIT License
# Copyright (c) Jerry Liu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publi... | [
"llama_index.schema.Document"
] | [((14702, 14755), 'logging.debug', 'log.debug', (['"""downloading file using OpenDAL: %s"""', 'path'], {}), "('downloading file using OpenDAL: %s', path)\n", (14711, 14755), True, 'import logging as log\n'), ((14765, 14796), 'typing.cast', 'cast', (['opendal.AsyncOperator', 'op'], {}), '(opendal.AsyncOperator, op)\n', ... |
from langchain.agents import (
initialize_agent,
Tool,
AgentType
)
from llama_index.callbacks import (
CallbackManager,
LlamaDebugHandler
)
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index import (
VectorStoreIndex,
SummaryIndex,
SimpleDirectoryReader,
ServiceConte... | [
"llama_index.SimpleDirectoryReader",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.StorageContext.from_defaults",
"llama_index.embeddings.OpenAIEmbedding",
"llama_index.VectorStoreIndex",
"llama_index.callbacks.CallbackManager",
"llama_index.SummaryIndex",
"llama_index.node_parser.simple.Sim... | [((398, 456), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (417, 456), False, 'import logging\n'), ((529, 545), 'os.getenv', 'os.getenv', (['"""LLM"""'], {}), "('LLM')\n", (538, 545), False, 'import os\n'), ((1217, 12... |
from llama_index import DiscordReader
from llama_index import download_loader
import os
import nest_asyncio
nest_asyncio.apply()
from llama_index import ServiceContext
import openai
import re
import csv
import time
import random
from dotenv import load_dotenv
import os
from llama_index import Document
load_dotenv()
... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.DiscordReader",
"llama_index.download_loader",
"llama_index.Document"
] | [((108, 128), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (126, 128), False, 'import nest_asyncio\n'), ((304, 317), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (315, 317), False, 'from dotenv import load_dotenv\n'), ((337, 365), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API"""'], {})... |
from typing import Union
from llama_index.core import Prompt
from llama_index.core.response_synthesizers import get_response_synthesizer, ResponseMode
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.agent.openai import OpenAI... | [
"llama_index.llms.openai.OpenAI",
"llama_index.core.llms.ChatMessage",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.Prompt",
"llama_index.agent.openai.OpenAIAgent.from_tools",
"llama_index.core.postprocessor.SimilarityPostprocessor"
] | [((2418, 2434), 'app.llama_index_server.chat_message_dao.ChatMessageDao', 'ChatMessageDao', ([], {}), '()\n', (2432, 2434), False, 'from app.llama_index_server.chat_message_dao import ChatMessageDao\n'), ((3036, 3057), 'app.llama_index_server.index_storage.index_storage.index', 'index_storage.index', ([], {}), '()\n', ... |
from typing import List
from fastapi.responses import StreamingResponse
from app.utils.json import json_to_model
from app.utils.index import get_agent
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import MessageRole, ChatMessage
from llama_index.agent import OpenAIA... | [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((809, 827), 'fastapi.Depends', 'Depends', (['get_agent'], {}), '(get_agent)\n', (816, 827), False, 'from fastapi import APIRouter, Depends, HTTPException, Re... |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, Document
from llama_index.llms import OpenAI
import openai
from llama_index import SimpleDirectoryReader
st.set_page_config(page_title="Converse com Resoluções do Bacen, powered by LlamaIndex", page_icon="🦙", layout="centered", initial_s... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((187, 366), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Converse com Resoluções do Bacen, powered by LlamaIndex"""', 'page_icon': '"""🦙"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Converse com Resoluções do Bacen, ... |
"""Agent utils."""
from llama_index.core.agent.types import TaskStep
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.memory import BaseMemory
def add_user_step_to_memory(
step: TaskStep, memory: BaseMemory, verbose: bool = False
) -> None:
"""Add user step to memor... | [
"llama_index.core.base.llms.types.ChatMessage"
] | [((345, 399), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'content': 'step.input', 'role': 'MessageRole.USER'}), '(content=step.input, role=MessageRole.USER)\n', (356, 399), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n')] |
from llama_index.core.tools import FunctionTool
def calculate_average(*values):
"""
Calculates the average of the provided values.
"""
return sum(values) / len(values)
average_tool = FunctionTool.from_defaults(
fn=calculate_average
)
| [
"llama_index.core.tools.FunctionTool.from_defaults"
] | [((200, 248), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'calculate_average'}), '(fn=calculate_average)\n', (226, 248), False, 'from llama_index.core.tools import FunctionTool\n')] |
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document
def load_knowledge() -> list[Document]:
# Load data from directory
documents = SimpleDirectoryReader('knowledge').load_data()
return documents
def create_index() -> GPTVectorStoreIndex:
print('Creating new i... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.load_from_disk"
] | [((432, 483), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size_limit': '(3000)'}), '(chunk_size_limit=3000)\n', (460, 483), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document\n'), ((496, 574), 'llama_index.GPTVectorStoreIndex... |
import logging
import streamlit as st
from llama_index import (
OpenAIEmbedding,
ServiceContext,
SimpleDirectoryReader,
VectorStoreIndex,
)
from llama_index.llms import OpenAI
from streamlit_examples.utils.theme import initPage
from streamlit_examples.utils.streamlit import cache_file, upload_files
in... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.llms.OpenAI",
"llama_index.SimpleDirectoryReader"
] | [((318, 339), 'streamlit_examples.utils.theme.initPage', 'initPage', (['"""QueryPDFs"""'], {}), "('QueryPDFs')\n", (326, 339), False, 'from streamlit_examples.utils.theme import initPage\n'), ((340, 466), 'streamlit.write', 'st.write', (['"""Ask questions or create summaries or explanations on PDFs using [LlamaIndex](h... |
#ingest uploaded documents
from global_settings import STORAGE_PATH, INDEX_STORAGE, CACHE_FILE
from logging_functions import log_action
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.ingestion import IngestionPipeline, IngestionCache
from llama_index.core.node_parser import T... | [
"llama_index.core.ingestion.IngestionCache.from_persist_path",
"llama_index.core.node_parser.TokenTextSplitter",
"llama_index.core.extractors.SummaryExtractor",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((644, 711), 'logging_functions.log_action', 'log_action', (['f"""File \'{doc.id_}\' uploaded user"""'], {'action_type': '"""UPLOAD"""'}), '(f"File \'{doc.id_}\' uploaded user", action_type=\'UPLOAD\')\n', (654, 711), False, 'from logging_functions import log_action\n'), ((786, 830), 'llama_index.core.ingestion.Ingest... |
import tiktoken
from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings
from llama_index.core.llms.mock import MockLLM
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
llm = MockLLM(max_tokens=256)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_mod... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.TreeIndex.from_documents",
"llama_index.core.llms.mock.MockLLM"
] | [((219, 242), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '(256)'}), '(max_tokens=256)\n', (226, 242), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((368, 400), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (383... |
import torch
from langchain.llms.base import LLM
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper
from llama_index import LLMPredictor, ServiceContext
from transformers import pipeline
from typing import Optional, List, Mapping, Any
"""
使用自定义 LLM 模型,您只需要实现Langchain 中的LLM类。您... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.GPTListIndex.from_documents",
"llama_index.PromptHelper"
] | [((616, 675), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (628, 675), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper\n'), ((1429, 1520), 'llama_index.S... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.