code
stringlengths
161
233k
apis
listlengths
1
24
extract_api
stringlengths
162
68.5k
from io import BytesIO from flask import Flask, jsonify import os # import tweepy from dotenv import load_dotenv from flask import request,jsonify import snscrape.modules.twitter as snstwitter import requests from goose3 import Goose from wordcloud import WordCloud, STOPWORDS import plotly.graph_objs as go import json ...
[ "llama_index.GPTVectorStoreIndex.from_documents" ]
[((1520, 1535), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1525, 1535), False, 'from flask import Flask, jsonify\n'), ((1536, 1545), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (1540, 1545), False, 'from flask_cors import CORS\n'), ((1613, 1626), 'dotenv.load_dotenv', 'load_dotenv', ([], {}),...
import sqlite3 import pandas as pd import llama_index import os import openai from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, text ) from llama_index import SQLDatabase, ServiceContext from llama_inde...
[ "llama_index.ServiceContext.from_defaults", "llama_index.llms.OpenAI", "llama_index.SQLDatabase", "llama_index.indices.struct_store.sql_query.NLSQLTableQueryEngine" ]
[((1122, 1158), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (1128, 1158), False, 'from llama_index.llms import OpenAI\n'), ((1168, 1210), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///company_info.db"""'], {}), "('sqlite:/...
import os from typing import Optional, Dict import openai import pandas as pd from langchain.llms import OpenAI import llama_index from llama_index.readers.schema.base import Document from llama_index import SimpleWebPageReader, QuestionAnswerPrompt from llama_index import ServiceContext, StorageContext, load_index_fr...
[ "llama_index.SimpleWebPageReader", "llama_index.LLMPredictor", "llama_index.OpenAIEmbedding", "llama_index.StorageContext.from_defaults", "llama_index.QuestionAnswerPrompt", "llama_index.load_index_from_storage", "llama_index.readers.schema.base.Document" ]
[((6636, 6688), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_path'}), '(persist_dir=index_path)\n', (6664, 6688), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((6759, 6832), 'llama_index.load_index_from_storage', ...
""" This script is used to summarize conversations from Zendesk support tickets. It reads text files containing comments from the ticket and generates a summary that includes information about the participants, problems raised, key events, current status of the ticket, and log lines from the messages. The script uses ...
[ "llama_index.core.SimpleDirectoryReader", "llama_index.llms.gemini.Gemini", "llama_index.core.response_synthesizers.TreeSummarize", "llama_index.core.ServiceContext.from_defaults" ]
[((1113, 1152), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1124, 1152), False, 'import os\n'), ((2083, 2091), 'llama_index.llms.gemini.Gemini', 'Gemini', ([], {}), '()\n', (2089, 2091), False, 'from llama_index.llms.gemini import Gemini\n'), ((2110, 2168)...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.legacy.core.response.schema.Response", "llama_index.legacy.schema.TextNode", "llama_index.legacy.indices.query.schema.QueryBundle", "llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service" ]
[((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True,...
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.legacy.bridge.pydantic import PrivateAttr from llama_index.legacy.schema import BaseNode, Met...
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.schema.TextNode", "llama_index.legacy.vector_stores.utils.node_to_metadata_dict", "llama_index.legacy.vector_stores.utils.metadata_dict_to_node" ]
[((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3...
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python) import os import requests import chainlit as cl from dotenv import load_dotenv import llama_index from llama_index.core import set_global_handler from llama_index.llms.openai import OpenAI from llama_inde...
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.llms.openai.OpenAI", "llama_index.vector_stores.faiss.FaissVectorStore", "llama_index.core.StorageContext.from_defaults", "llama_index.core.load_index_from_storage", "llama_index.core.set_global_handler", "llama_index.core.SimpleDirectoryR...
[((1242, 1255), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1253, 1255), False, 'from dotenv import load_dotenv\n'), ((1261, 1346), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""wandb"""'], {'run_args': "{'project': 'aie1-llama-index-middleterm'}"}), "('wandb', run_args={'project': 'aie...
""" This script is used to summarize conversations from Zendesk support tickets. It reads text files containing comments from the ticket and generates a summary that includes information about the participants, problems raised, key events, current status of the ticket, and log lines from the messages. The script uses ...
[ "llama_index.core.SimpleDirectoryReader", "llama_index.llms.ollama.Ollama", "llama_index.core.response_synthesizers.TreeSummarize", "llama_index.core.ServiceContext.from_defaults" ]
[((1201, 1240), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1212, 1240), False, 'import os\n'), ((2215, 2263), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': 'MODEL', 'request_timeout': 'TIMEOUT_SEC'}), '(model=MODEL, request_timeout=TIMEOUT_SE...
"""Response builder class. This class provides general functions for taking in a set of text and generating a response. Will support different modes, from 1) stuffing chunks into prompt, 2) create and refine separately over each chunk, 3) tree summarization. """ import logging from abc import abstractmethod from ty...
[ "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.base.response.schema.StreamingResponse", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.base.query_pipeline.query.OutputKeys.from_keys", "llama_index.core.base.response.schema.Resp...
[((1679, 1714), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1704, 1714), True, 'import llama_index.core.instrumentation as instrument\n'), ((1725, 1752), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1742, 1752), F...
""" This script is used to summarize conversations from Zendesk support tickets. It reads text files containing comments from the ticket and generates a summary that includes information about the participants, problems raised, key events, current status of the ticket, and log lines from the messages. The script uses ...
[ "llama_index.core.SimpleDirectoryReader", "llama_index.core.ServiceContext.from_defaults", "llama_index.llms.anthropic.Anthropic", "llama_index.core.response_synthesizers.TreeSummarize" ]
[((1211, 1250), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1222, 1250), False, 'import os\n'), ((2181, 2220), 'llama_index.llms.anthropic.Anthropic', 'Anthropic', ([], {'model': 'MODEL', 'max_tokens': '(1024)'}), '(model=MODEL, max_tokens=1024)\n', (2190,...
"""Google Generative AI Vector Store. The GenAI Semantic Retriever API is a managed end-to-end service that allows developers to create a corpus of documents to perform semantic search on related passages given a user query. For more information visit: https://developers.generativeai.google/guide """ import logging i...
[ "llama_index.vector_stores.google.genai_extension.build_semantic_retriever", "llama_index.vector_stores.google.genai_extension.get_corpus", "llama_index.vector_stores.google.genai_extension.Config", "llama_index.vector_stores.google.genai_extension.EntityName.from_str", "llama_index.core.bridge.pydantic.Fie...
[((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((2859, 2881), 'llama_index.vector_stores.google.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (2872, 2881), True, 'import llama_index.vector_stores.google.genai_exten...
from typing import Any from llama_index.core.callbacks.base_handler import BaseCallbackHandler from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler def set_global_handler(eval_mode: str, **eval_params: Any) -> None: """Set global eval handlers.""" import llama_index.core llama_inde...
[ "llama_index.callbacks.wandb.WandbCallbackHandler", "llama_index.callbacks.deepeval.deepeval_callback_handler", "llama_index.callbacks.argilla.argilla_callback_handler", "llama_index.callbacks.honeyhive.honeyhive_callback_handler", "llama_index.callbacks.openinference.OpenInferenceCallbackHandler", "llama...
[((941, 976), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (961, 976), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1424, 1467), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler'...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.indices.query.schema.QueryBundle", "llama_index.schema.TextNode", "llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service", "llama_index.core.response.schema.Response" ]
[((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'impor...
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.bridge.pydantic import PrivateAttr from llama_index.schema import BaseNode, MetadataMode, Tex...
[ "llama_index.vector_stores.utils.metadata_dict_to_node", "llama_index.bridge.pydantic.PrivateAttr", "llama_index.schema.TextNode", "llama_index.vector_stores.utils.node_to_metadata_dict" ]
[((599, 618), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (608, 618), False, 'from logging import getLogger\n'), ((2444, 2497), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2476, 2497), False, 'import elasticsearch\n'), ((3820, 3...
"""Base query engine.""" import logging from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.core.bas...
[ "llama_index.core.instrumentation.events.query.QueryStartEvent", "llama_index.core.instrumentation.events.query.QueryEndEvent", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.base.query_pipeline.query.OutputKeys.from_keys", "llama_index.core.instrumentation.get_dispatche...
[((785, 820), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (810, 820), True, 'import llama_index.core.instrumentation as instrument\n'), ((830, 857), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (847, 857), False, 'i...
import logging import sys import torch import bentoml import llama_index from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext from llama_index.llms import HuggingFaceLLM import typing as t from typing import Any, List from InstructorEmbedding import INSTRUCTOR from llama_index.bridge.pydan...
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.SimpleDirectoryReader", "llama_index.prompts.PromptTemplate", "llama_index.llms.HuggingFaceLLM", "llama_index.bridge.pydantic.PrivateAttr" ]
[((446, 504), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (465, 504), False, 'import logging\n'), ((3971, 4037), 'bentoml.Service', 'bentoml.Service', (['"""llamaindex_service"""'], {'runners': '[llamaindex_runner]'}...
import llama_index import chromadb from importlib.metadata import version print(f"LlamaIndex version: {version('llama_index')}") print(f"Chroma version: {version('chromadb')}") # Load API key from .env file import os from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv()) # Define embe...
[ "llama_index.embeddings.huggingface.HuggingFaceEmbedding", "llama_index.llms.openai.OpenAI", "llama_index.core.VectorStoreIndex", "llama_index.core.postprocessor.MetadataReplacementPostProcessor", "llama_index.core.StorageContext.from_defaults", "llama_index.core.node_parser.SentenceWindowNodeParser.from_...
[((511, 557), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo', temperature=0.1)\n", (517, 557), False, 'from llama_index.llms.openai import OpenAI\n'), ((582, 599), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([]...
import os, openai from dotenv import load_dotenv load_dotenv() OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") openai.api_key = OPENAI_API_KEY # pip install google-search-results from dotenv import load_dotenv from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseMo...
[ "llama_index.ServiceContext.from_defaults", "llama_index.indices.loading.load_index_from_storage", "llama_index.set_global_service_context", "llama_index.StorageContext.from_defaults" ]
[((49, 62), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (60, 62), False, 'from dotenv import load_dotenv\n'), ((80, 112), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (94, 112), False, 'import llama_index, os\n'), ((380, 389), 'fastapi.FastAPI', 'FastAPI', ([], ...
"""FastAPI app creation, logger configuration and main API routes.""" import llama_index from private_gpt.di import global_injector from private_gpt.launcher import create_app # Add LlamaIndex simple observability llama_index.set_global_handler("simple") app = create_app(global_injector)
[ "llama_index.set_global_handler" ]
[((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launc...
""" Astra DB Vector store index. An index based on a DB table with vector search capabilities, powered by the astrapy library """ import json import logging from typing import Any, Dict, List, Optional, cast from warnings import warn import llama_index.core from llama_index.core.bridge.pydantic import PrivateAttr f...
[ "llama_index.core.vector_stores.utils.metadata_dict_to_node", "llama_index.core.vector_stores.utils.node_to_metadata_dict", "llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings", "llama_index.core.bridge.pydantic.PrivateAttr", "llama_index.core.vector_stores.types.VectorStoreQueryResult"...
[((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((2070, 2083), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2081, 2083), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2118, ...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.vector_stores.google.genai_extension.build_generative_service", "llama_index.core.schema.TextNode", "llama_index.core.llms.mock.MockLLM", "llama_index.core.base.response.schema.Response", "llama_index.core.indices.query.schema.QueryBundle" ]
[((1057, 1084), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1074, 1084), False, 'import logging\n'), ((2707, 2740), 'llama_index.vector_stores.google.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2738, 2740), True, 'import llama_index...
from langfuse import Langfuse from llama_index.llms.openai import OpenAI import llama_index.core llama_index.core.set_global_handler("langfuse") from llama_index.core.llms import ChatMessage langfuse = Langfuse() dataset = langfuse.get_dataset("term-extraction") prompt = langfuse.get_prompt("extraction-prompt-1") mod...
[ "llama_index.llms.openai.OpenAI", "llama_index.core.llms.ChatMessage" ]
[((203, 213), 'langfuse.Langfuse', 'Langfuse', ([], {}), '()\n', (211, 213), False, 'from langfuse import Langfuse\n'), ((325, 360), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-turbo-preview"""'}), "(model='gpt-4-turbo-preview')\n", (331, 360), False, 'from llama_index.llms.openai import OpenAI...
from unittest.mock import MagicMock, patch import pytest from llama_index.legacy.core.response.schema import Response from llama_index.legacy.schema import Document try: import google.ai.generativelanguage as genai has_google = True except ImportError: has_google = False from llama_index.legacy.indices....
[ "llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config", "llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus", "llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus", "llama_index.legacy.schema.Document", "llama_index.legacy.i...
[((693, 752), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (711, 752), False, 'import pytest\n'), ((754, 798), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credent...
from unittest.mock import MagicMock, patch import pytest try: import google.ai.generativelanguage as genai has_google = True except ImportError: has_google = False from llama_index.legacy.response_synthesizers.google.generativeai import ( GoogleTextSynthesizer, set_google_config, ) from llama_in...
[ "llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults", "llama_index.legacy.schema.TextNode", "llama_index.legacy.response_synthesizers.google.generativeai.set_google_config", "llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config" ]
[((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credent...
"""Global eval handlers.""" from typing import Any from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler from llama_index.callbacks.base_handler import BaseCallbackHandler from llama_index.callbacks.deepeval_callback import deepeval_callback_handler from llama_index.callbacks.honeyhi...
[ "llama_index.callbacks.wandb_callback.WandbCallbackHandler", "llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler", "llama_index.callbacks.simple_llm_handler.SimpleLLMHandler", "llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler", "llama_index.callbacks.promptlayer_h...
[((1068, 1103), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1088, 1103), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1161, 1204), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler'...
"""Global eval handlers.""" from typing import Any from llama_index.callbacks.argilla_callback import argilla_callback_handler from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler from llama_index.callbacks.base_handler import BaseCallbackHandler from llama_index.callbacks.deepeval_...
[ "llama_index.callbacks.wandb_callback.WandbCallbackHandler", "llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler", "llama_index.callbacks.simple_llm_handler.SimpleLLMHandler", "llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler", "llama_index.callbacks.promptlayer_h...
[((1144, 1179), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1164, 1179), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1237, 1280), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler'...
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.schema import BaseNode, MetadataMode, TextNode from llama_index.vector_stores.types import ( ...
[ "llama_index.vector_stores.utils.metadata_dict_to_node", "llama_index.schema.TextNode", "llama_index.vector_stores.utils.node_to_metadata_dict" ]
[((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3...
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.schema import BaseNode, MetadataMode, TextNode from llama_index.vector_stores.types import ( ...
[ "llama_index.vector_stores.utils.metadata_dict_to_node", "llama_index.schema.TextNode", "llama_index.vector_stores.utils.node_to_metadata_dict" ]
[((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3...
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.schema import BaseNode, MetadataMode, TextNode from llama_index.vector_stores.types import ( ...
[ "llama_index.vector_stores.utils.metadata_dict_to_node", "llama_index.schema.TextNode", "llama_index.vector_stores.utils.node_to_metadata_dict" ]
[((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3...
"""Global eval handlers.""" from typing import Any from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler from llama_index.callbacks.base_handler import BaseCallbackHandler from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler from llama_index.callbacks.open_...
[ "llama_index.callbacks.wandb_callback.WandbCallbackHandler", "llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler", "llama_index.callbacks.simple_llm_handler.SimpleLLMHandler", "llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler", "llama_index.callbacks.promptlayer_h...
[((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler',...
"""Global eval handlers.""" from typing import Any from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler from llama_index.callbacks.base_handler import BaseCallbackHandler from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler from llama_index.callbacks.open_...
[ "llama_index.callbacks.wandb_callback.WandbCallbackHandler", "llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler", "llama_index.callbacks.simple_llm_handler.SimpleLLMHandler", "llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler", "llama_index.callbacks.promptlayer_h...
[((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler',...
"""Global eval handlers.""" from typing import Any from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler from llama_index.callbacks.base_handler import BaseCallbackHandler from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler from llama_index.callbacks.open_...
[ "llama_index.callbacks.wandb_callback.WandbCallbackHandler", "llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler", "llama_index.callbacks.simple_llm_handler.SimpleLLMHandler", "llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler", "llama_index.callbacks.promptlayer_h...
[((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler',...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.legacy.core.response.schema.Response", "llama_index.legacy.schema.TextNode", "llama_index.legacy.indices.query.schema.QueryBundle", "llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service" ]
[((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True,...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.legacy.core.response.schema.Response", "llama_index.legacy.schema.TextNode", "llama_index.legacy.indices.query.schema.QueryBundle", "llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service" ]
[((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True,...
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.legacy.bridge.pydantic import PrivateAttr from llama_index.legacy.schema import BaseNode, Met...
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.schema.TextNode", "llama_index.legacy.vector_stores.utils.node_to_metadata_dict", "llama_index.legacy.vector_stores.utils.metadata_dict_to_node" ]
[((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3...
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.legacy.bridge.pydantic import PrivateAttr from llama_index.legacy.schema import BaseNode, Met...
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.schema.TextNode", "llama_index.legacy.vector_stores.utils.node_to_metadata_dict", "llama_index.legacy.vector_stores.utils.metadata_dict_to_node" ]
[((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3...
"""Google Generative AI Vector Store. The GenAI Semantic Retriever API is a managed end-to-end service that allows developers to create a corpus of documents to perform semantic search on related passages given a user query. For more information visit: https://developers.generativeai.google/guide """ import logging i...
[ "llama_index.vector_stores.google.genai_extension.build_semantic_retriever", "llama_index.vector_stores.google.genai_extension.get_corpus", "llama_index.vector_stores.google.genai_extension.Config", "llama_index.vector_stores.google.genai_extension.EntityName.from_str", "llama_index.core.bridge.pydantic.Fie...
[((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((2859, 2881), 'llama_index.vector_stores.google.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (2872, 2881), True, 'import llama_index.vector_stores.google.genai_exten...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.indices.query.schema.QueryBundle", "llama_index.schema.TextNode", "llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service", "llama_index.core.response.schema.Response" ]
[((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'impor...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.indices.query.schema.QueryBundle", "llama_index.schema.TextNode", "llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service", "llama_index.core.response.schema.Response" ]
[((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'impor...
"""FastAPI app creation, logger configuration and main API routes.""" import llama_index from private_gpt.di import global_injector from private_gpt.launcher import create_app # Add LlamaIndex simple observability llama_index.set_global_handler("simple") app = create_app(global_injector)
[ "llama_index.set_global_handler" ]
[((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launc...
"""FastAPI app creation, logger configuration and main API routes.""" import llama_index from private_gpt.di import global_injector from private_gpt.launcher import create_app # Add LlamaIndex simple observability llama_index.set_global_handler("simple") app = create_app(global_injector)
[ "llama_index.set_global_handler" ]
[((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launc...
"""FastAPI app creation, logger configuration and main API routes.""" import llama_index from private_gpt.di import global_injector from private_gpt.launcher import create_app # Add LlamaIndex simple observability llama_index.set_global_handler("simple") app = create_app(global_injector)
[ "llama_index.set_global_handler" ]
[((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launc...
"""FastAPI app creation, logger configuration and main API routes.""" import llama_index from private_gpt.di import global_injector from private_gpt.launcher import create_app # Add LlamaIndex simple observability llama_index.set_global_handler("simple") app = create_app(global_injector)
[ "llama_index.set_global_handler" ]
[((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launc...
""" Astra DB Vector store index. An index based on a DB table with vector search capabilities, powered by the astrapy library """ import json import logging from typing import Any, Dict, List, Optional, cast from warnings import warn import llama_index.core from llama_index.core.bridge.pydantic import PrivateAttr f...
[ "llama_index.core.vector_stores.utils.metadata_dict_to_node", "llama_index.core.vector_stores.utils.node_to_metadata_dict", "llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings", "llama_index.core.bridge.pydantic.PrivateAttr", "llama_index.core.vector_stores.types.VectorStoreQueryResult"...
[((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((2070, 2083), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2081, 2083), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2118, ...
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import loggin...
[ "llama_index.vector_stores.google.genai_extension.build_generative_service", "llama_index.core.schema.TextNode", "llama_index.core.llms.mock.MockLLM", "llama_index.core.base.response.schema.Response", "llama_index.core.indices.query.schema.QueryBundle" ]
[((1057, 1084), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1074, 1084), False, 'import logging\n'), ((2707, 2740), 'llama_index.vector_stores.google.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2738, 2740), True, 'import llama_index...
from unittest.mock import MagicMock, patch import pytest from llama_index.legacy.core.response.schema import Response from llama_index.legacy.schema import Document try: import google.ai.generativelanguage as genai has_google = True except ImportError: has_google = False from llama_index.legacy.indices....
[ "llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config", "llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus", "llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus", "llama_index.legacy.schema.Document", "llama_index.legacy.i...
[((693, 752), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (711, 752), False, 'import pytest\n'), ((754, 798), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credent...
from unittest.mock import MagicMock, patch import pytest try: import google.ai.generativelanguage as genai has_google = True except ImportError: has_google = False from llama_index.legacy.response_synthesizers.google.generativeai import ( GoogleTextSynthesizer, set_google_config, ) from llama_in...
[ "llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults", "llama_index.legacy.schema.TextNode", "llama_index.legacy.response_synthesizers.google.generativeai.set_google_config", "llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config" ]
[((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credent...
from typing import Any, Dict, List, Optional, Tuple from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.base.response.schema import RESPONSE_TYPE from llama_index.core.callbacks.schema import CBEventType, EventPayload from llama_index.core.indices.composability.graph import Compos...
[ "llama_index.core.instrumentation.get_dispatcher", "llama_index.core.settings.callback_manager_from_settings_or_context", "llama_index.core.schema.NodeWithScore" ]
[((585, 620), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (610, 620), True, 'import llama_index.core.instrumentation as instrument\n'), ((1649, 1734), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_set...
from llama_index import ( SimpleDirectoryReader, VectorStoreIndex, ServiceContext, ) from llama_index.llms import LlamaCPP from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt import llama_index.llms.llama_cpp from langchain.embeddings import HuggingFaceEmbeddings import co...
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.SimpleDirectoryReader" ]
[((431, 491), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'config.EMBEDDING_MODEL_URL'}), '(model_name=config.EMBEDDING_MODEL_URL)\n', (452, 491), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((538, 600), 'llama_index.ServiceContext.from_defaults', '...
import time import llama_index from atlassian import Bitbucket import os import sys sys.path.append('../') import local_secrets as secrets start_time = time.time() stash = Bitbucket('https://git.techstyle.net', token=secrets.stash_token) os.environ['OPENAI_API_KEY'] = secrets.techstyle_openai_key project ='DATASICENCE...
[ "llama_index.GPTSimpleVectorIndex", "llama_index.Document" ]
[((84, 106), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (99, 106), False, 'import sys\n'), ((153, 164), 'time.time', 'time.time', ([], {}), '()\n', (162, 164), False, 'import time\n'), ((173, 238), 'atlassian.Bitbucket', 'Bitbucket', (['"""https://git.techstyle.net"""'], {'token': 'secrets....
import json from typing import Dict, List import llama_index.query_engine from llama_index import ServiceContext, QueryBundle from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager from llama_index.indices.base import BaseIndex from llama_index.indices.query.base import BaseQueryEngine from ...
[ "llama_index.ServiceContext.from_defaults", "llama_index.callbacks.LlamaDebugHandler", "llama_index.selectors.LLMSingleSelector.from_defaults", "llama_index.tools.QueryEngineTool.from_defaults", "llama_index.response.schema.Response", "llama_index.callbacks.CallbackManager" ]
[((1288, 1314), 'llama_index.response.schema.Response', 'Response', (['f"""我是{self.name}"""'], {}), "(f'我是{self.name}')\n", (1296, 1314), False, 'from llama_index.response.schema import RESPONSE_TYPE, Response\n'), ((2113, 2203), 'llama_index.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {...
import qdrant_client from llama_index import ( VectorStoreIndex, ServiceContext, ) from llama_index.llms import Ollama from llama_index.vector_stores.qdrant import QdrantVectorStore import llama_index llama_index.set_global_handler("simple") # re-initialize the vector store client = qdrant_client.QdrantClient...
[ "llama_index.ServiceContext.from_defaults", "llama_index.set_global_handler", "llama_index.vector_stores.qdrant.QdrantVectorStore", "llama_index.VectorStoreIndex.from_vector_store", "llama_index.llms.Ollama" ]
[((210, 250), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (240, 250), False, 'import llama_index\n'), ((294, 342), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (320, 342), Fa...
import os import glob import llama_index from llama_index.core import ServiceContext from llama_index.llms.ollama import Ollama from llama_index.core import SimpleDirectoryReader from llama_index.core.response_synthesizers import TreeSummarize # MODEL = "mistral" MODEL = "llama2" # MODEL = "llama2:text" # Doesn't f...
[ "llama_index.core.SimpleDirectoryReader", "llama_index.llms.ollama.Ollama", "llama_index.core.response_synthesizers.TreeSummarize", "llama_index.core.ServiceContext.from_defaults" ]
[((525, 564), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (536, 564), False, 'import os\n'), ((1540, 1588), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': 'MODEL', 'request_timeout': 'TIMEOUT_SEC'}), '(model=MODEL, request_timeout=TIMEOUT_SEC)\n...
from io import BytesIO from flask import Flask, jsonify import os # import tweepy from dotenv import load_dotenv from flask import request,jsonify import snscrape.modules.twitter as snstwitter from snscrape.modules.twitter import TwitterSearchScraper, TwitterSearchScraperMode import requests from goose3 import Goose fr...
[ "llama_index.GPTVectorStoreIndex.from_documents", "llama_index.TwitterTweetReader" ]
[((973, 988), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (978, 988), False, 'from flask import Flask, jsonify\n'), ((1057, 1070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1068, 1070), False, 'from dotenv import load_dotenv\n'), ((1078, 1106), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_A...
## main function of AWS Lambda function import llama_index from llama_index import download_loader import boto3 import json import urllib.parse from llama_index import SimpleDirectoryReader def main(event, context): # extracting s3 bucket and key information from SQS message print(event) s3_info = json.lo...
[ "llama_index.download_loader" ]
[((313, 352), 'json.loads', 'json.loads', (["event['Records'][0]['body']"], {}), "(event['Records'][0]['body'])\n", (323, 352), False, 'import json\n'), ((628, 692), 'llama_index.download_loader', 'download_loader', (['"""S3Reader"""'], {'custom_path': '"""/tmp/llamahub_modules"""'}), "('S3Reader', custom_path='/tmp/ll...
"""Download.""" import json import logging import os import subprocess import sys from enum import Enum from importlib import util from pathlib import Path from typing import Any, Dict, List, Optional, Union import pkg_resources import requests from pkg_resources import DistributionNotFound from llama_index.download....
[ "llama_index.download.utils.get_exports", "llama_index.download.utils.initialize_directory" ]
[((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.ini...
import json from typing import Dict, List import llama_index.query_engine from llama_index import ServiceContext, QueryBundle from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager from llama_index.indices.base import BaseIndex from llama_index.indices.query.base import BaseQueryEngine from ...
[ "llama_index.ServiceContext.from_defaults", "llama_index.callbacks.CallbackManager", "llama_index.callbacks.LlamaDebugHandler", "llama_index.response.schema.Response" ]
[((1298, 1324), 'llama_index.response.schema.Response', 'Response', (['f"""我是{self.name}"""'], {}), "(f'我是{self.name}')\n", (1306, 1324), False, 'from llama_index.response.schema import RESPONSE_TYPE, Response\n'), ((2530, 2571), 'common.llm.create_llm', 'create_llm', (['cb_manager', 'LLM_CACHE_ENABLED'], {}), '(cb_man...
import asyncio import math import numpy as np import random import tqdm from functools import reduce from typing import Any, List, Dict, Sequence, Union, Coroutine, Iterable from llama_index.core.async_utils import asyncio_module from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.llms ...
[ "llama_index.packs.diff_private_simple_dataset.events.SyntheticExampleEndEvent", "llama_index.core.instrumentation.get_dispatcher", "llama_index.core.bridge.pydantic.Field", "llama_index.packs.diff_private_simple_dataset.templates.zero_shot_completion_template.format", "llama_index.packs.diff_private_simple...
[((1280, 1315), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1305, 1315), True, 'import llama_index.core.instrumentation as instrument\n'), ((1589, 1654), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction assoc...
"""Download.""" import json import logging import os import subprocess import sys from enum import Enum from importlib import util from pathlib import Path from typing import Any, Dict, List, Optional, Union import pkg_resources import requests from pkg_resources import DistributionNotFound from llama_index.download....
[ "llama_index.download.utils.get_exports", "llama_index.download.utils.initialize_directory" ]
[((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.ini...