id stringlengths 14 16 | text stringlengths 45 2.05k | source stringlengths 53 111 |
|---|---|---|
1092c5dca18c-1 | Other methods are are available for convenience.
:class:`SearxResults` is a convenience wrapper around the raw json result.
Example usage of the ``run`` method to make a search:
.. code-block:: python
s.run(query="what is the best search engine?")
Engine Parameters
-----------------
You can pass any `accept... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
1092c5dca18c-2 | .. code-block:: python
# select the github engine and pass the search suffix
s = SearchWrapper("langchain library", query_suffix="!gh")
s = SearchWrapper("langchain library")
# select github the conventional google search syntax
s.run("large language models", query_suffix="site:g... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
1092c5dca18c-3 | return {"language": "en", "format": "json"}
[docs]class SearxResults(dict):
"""Dict like wrapper around search api results."""
_data = ""
def __init__(self, data: str):
"""Take a raw result from Searx and make it into a dict like object."""
json_data = json.loads(data)
super().__init... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
1092c5dca18c-4 | .. code-block:: python
from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
un... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
1092c5dca18c-5 | print(
f"Warning: missing the url scheme on host \
! assuming secure https://{searx_host} "
)
searx_host = "https://" + searx_host
elif searx_host.startswith("http://"):
values["unsecure"] = True
cls.disable_ssl_warnings(True)
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
1092c5dca18c-6 | Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant"... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
1092c5dca18c-7 | num_results: int,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suf... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
1092c5dca18c-8 | "snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
By Harrison Chase
© Copyright 2023, Ha... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/searx_search.html |
fcbf1eddffa0-0 | Source code for langchain.utilities.serpapi
"""Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, Optional, Tuple
import aiohttp
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.utils import get_from_dic... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/serpapi.html |
fcbf1eddffa0-1 | aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python packag... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/serpapi.html |
fcbf1eddffa0-2 | else:
async with self.aiosession.get(url, params=params) as response:
res = await response.json()
return self._process_response(res)
[docs] def run(self, query: str) -> str:
"""Run query through SerpAPI and parse result."""
return self._process_response(self.result... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/serpapi.html |
fcbf1eddffa0-3 | ):
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif (
"sports_results" in res.keys()
and "game_spotlight" in res["sports_results"].keys()
):
toret = res["sports_results"]["game_spotlight"]
elif (
"knowledge_graph" in res.k... | https://langchain.readthedocs.io/en/latest/_modules/langchain/utilities/serpapi.html |
bcf4cfbb1d87-0 | Source code for langchain.vectorstores.opensearch_vector_search
"""Wrapper around OpenSearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Dict, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from la... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-1 | f"Got error: {e} "
)
return client
def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None:
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError("Embeddings size is zero")
if bulk_size < embeddings_length:
ra... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-2 | return {
"mappings": {
"properties": {
"vector_field": {"type": "knn_vector", "dimension": dim},
}
}
}
def _default_text_mapping(
dim: int,
engine: str = "nmslib",
space_type: str = "l2",
ef_search: int = 512,
ef_construction: int = 512,
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-3 | pre_filter: Dict = MATCH_ALL_QUERY,
) -> Dict:
"""For Script Scoring Search, this is the default query."""
return {
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": "knn_score",
"lang": "knn",
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-4 | },
},
}
}
}
def _get_kwargs_value(kwargs: Any, key: str, default_value: Any) -> Any:
"""Get the value of the key if present. Else get the default_value."""
if key in kwargs:
return kwargs.get(key)
return default_value
[docs]class OpenSearchVectorSearch(VectorS... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-5 | self.embedding_function.embed_documents(list(text))[0] for text in texts
]
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
return _bulk_ingest_embeddings(
self.client, self.index_name, embeddings, texts, metadatas
)
[docs] def similarity_search(
self... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-6 | nearest neighbors; default: {"match_all": {}}
"""
embedding = self.embedding_function.embed_query(query)
search_type = _get_kwargs_value(kwargs, "search_type", "approximate_search")
if search_type == "approximate_search":
size = _get_kwargs_value(kwargs, "size", 4)
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-7 | **kwargs: Any,
) -> OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from raw documents.
Example:
.. code-block:: python
from langchain import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embed... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
bcf4cfbb1d87-8 | kwargs, "opensearch_url", "OPENSEARCH_URL"
)
client = _get_opensearch_client(opensearch_url)
embeddings = embedding.embed_documents(texts)
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
index_name = uuid.uuid4().hex
is_appx... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
7be178cde929-0 | Source code for langchain.vectorstores.faiss
"""Wrapper around FAISS vector database."""
from __future__ import annotations
import pickle
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import AddableMixin, Docs... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
7be178cde929-1 | self.index_to_docstore_id = index_to_docstore_id
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: I... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
7be178cde929-2 | self.docstore.add({_id: doc for _, _id, doc in full_info})
index_to_id = {index: _id for index, _id, _ in full_info}
self.index_to_docstore_id.update(index_to_id)
return [_id for _, _id, _ in full_info]
[docs] def similarity_search_with_score_by_vector(
self, embedding: List[float], k... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
7be178cde929-3 | Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
docs = self.similarity_search_with_score_by_vector(embedding, k)
return docs
[docs] def similarity_search_by_vector(
self, embedding: List[float],... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
7be178cde929-4 | among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal r... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
7be178cde929-5 | k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_se... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
7be178cde929-6 | [docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> FAISS:
"""Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Em... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
7be178cde929-7 | Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
"""
path = Path(folder_path)
path.mkdir(exist_ok=True, parents=True)
# save index separately since it is not picklable
faiss = dependable_faiss_import()
faiss.... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/faiss.html |
823fc207d553-0 | Source code for langchain.vectorstores.atlas
"""Wrapper around Atlas by Nomic."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langc... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
823fc207d553-1 | is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
"""
try:
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
823fc207d553-2 | metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]]): An optional list of ids.
refresh(bool): Whether or not to refresh indices with the updated data.
Default True.
Returns:
List[str]: List of IDs of the added texts... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
823fc207d553-3 | else:
if metadatas is None:
data = [
{"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]}
for i, text in enumerate(texts)
]
else:
for i, text in enumerate(texts):
metadatas[i]["text"] =... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
823fc207d553-4 | """
if self._embedding_function is None:
raise NotImplementedError(
"AtlasDB requires an embedding_function for text similarity search!"
)
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
823fc207d553-5 | ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
823fc207d553-6 | ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
persist_directory: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: O... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
823fc207d553-7 | return cls.from_texts(
name=name,
api_key=api_key,
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
description=description,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/atlas.html |
c29a7d53a249-0 | Source code for langchain.vectorstores.base
"""Interface for vector stores."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
[docs]class VectorStore... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/base.html |
c29a7d53a249-1 | ) -> List[Document]:
"""Return docs most similar to query."""
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up doc... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/base.html |
c29a7d53a249-2 | fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
raise NotImplementedError
[docs] @classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embedding... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/base.html |
0aa22f545fd0-0 | Source code for langchain.vectorstores.qdrant
"""Wrapper around Qdrant vector database."""
import uuid
from operator import itemgetter
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
fr... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-1 | f"got {type(client)}"
)
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.embedding_function = embedding_function
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-2 | Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-3 | among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-4 | ) -> "Qdrant":
return cast(
Qdrant,
super().from_documents(
documents,
embedding,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-5 | metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. De... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-6 | **kwargs:
Additional arguments passed directly into REST client initialization
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Qdrant database
This is intended to be a quick way to get st... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-7 | ),
)
# Now generate the embeddings for all the texts
embeddings = embedding.embed_documents(texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch(
ids=[uuid.uuid4().hex for _ in texts],
vectors=embeddings,
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
0aa22f545fd0-8 | return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
def _qdrant_filter_from_dict(self, filter: Optional[MetadataFilter]) -> Any:
if filter is None or 0 == len(filter):
return... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/qdrant.html |
62d6c6370dea-0 | Source code for langchain.vectorstores.milvus
"""Wrapper around the Milvus vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from ... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-1 | if not connections.has_connection("default"):
connections.connect(**connection_args)
self.embedding_func = embedding_function
self.collection_name = collection_name
self.text_field = text_field
self.auto_id = False
self.primary_field = None
self.vector_field =... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-2 | texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
partition_name: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[str]:
"""Insert text data into Milvus.
When using add_texts() it is assumed that a collecton has already
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-3 | # Insert into the collection.
res = self.col.insert(
insert_list, partition_name=partition_name, timeout=timeout
)
# Flush to make sure newly inserted is immediately searchable.
self.col.flush()
return res.primary_keys
def _worker_search(
self,
que... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-4 | ret.append(
(
Document(page_content=meta.pop(self.text_field), metadata=meta),
result.distance,
result.id,
)
)
return data[0], ret
[docs] def similarity_search_with_score(
self,
query: str,... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-5 | )
return [(x, y) for x, y, _ in result]
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decim... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-6 | # Extract result IDs.
ids = [x for _, _, x in res]
# Get the raw vectors from Milvus.
vectors = self.col.query(
expr=f"{self.primary_field} in {ids}",
output_fields=[self.primary_field, self.vector_field],
)
# Reorganize the results from query to match res... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-7 | Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
partition_names (List[str], optional): What partitions to search.
Defaults to None.
round_decimal (int, optional): What decimal point to round to.
Defaults to -1.
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-8 | "Please install it with `pip install pymilvus`."
)
# Connect to Milvus instance
if not connections.has_connection("default"):
connections.connect(**kwargs.get("connection_args", {"port": 19530}))
# Determine embedding dim
embeddings = embedding.embed_query(texts[0... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
62d6c6370dea-9 | )
else:
fields.append(FieldSchema(key, dtype))
# Find out max length of texts
max_length = 0
for y in texts:
max_length = max(max_length, len(y))
# Create the text field
fields.append(
FieldSchema(text_field, DataType.VA... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/milvus.html |
14792f71fc7f-0 | Source code for langchain.vectorstores.chroma
"""Wrapper around ChromaDB embeddings platform."""
from __future__ import annotations
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.embeddings.base impo... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/chroma.html |
14792f71fc7f-1 | self,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
) -> None:
"""Initialize with Chroma client."""
try:
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/chroma.html |
14792f71fc7f-2 | metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
# TODO: Handle the case where the user doesn't provide ids on the Collection
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/chroma.html |
14792f71fc7f-3 | """Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
results = self._collection.qu... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/chroma.html |
14792f71fc7f-4 | [docs] def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You m... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/chroma.html |
14792f71fc7f-5 | Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding,
persist_directory=persist_directory,
client_settings=client_settings,
)
chroma_collection.add_texts(texts=texts, metadat... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/chroma.html |
14792f71fc7f-6 | return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
)
By Harrison Chase
© Copyr... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/chroma.html |
3873beacaf74-0 | Source code for langchain.vectorstores.deeplake
"""Wrapper around Activeloop Deep Lake."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Sequence
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Em... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/deeplake.html |
3873beacaf74-1 | vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "mem://langchain"
def __init__(
self,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
token: Optional[str] = None,
embedding_function: Optional[Embeddings] = None,
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/deeplake.html |
3873beacaf74-2 | **kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], opti... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/deeplake.html |
3873beacaf74-3 | ) -> List[Document]:
"""Return docs most similar to query."""
if self._embedding_function is None:
self.ds.summary()
ds_view = self.ds.filter(lambda x: query in x["text"].data()["value"])
else:
query_emb = np.array(self._embedding_function.embed_query(query))
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/deeplake.html |
3873beacaf74-4 | Credentials are required in either the environment or
passed to the creds argument.
- a local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- a memory path of the form ``mem://path/to/dataset`` ... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/deeplake.html |
f52494be48b0-0 | Source code for langchain.vectorstores.weaviate
"""Wrapper around weaviate vector database."""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from uuid import uuid4
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/weaviate.html |
f52494be48b0-1 | [docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/weaviate.html |
f52494be48b0-2 | cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VectorStore:
"""Not implemented for Weaviate yet."""
raise NotImplementedError("weaviate does not currently support `from_texts`.")
By Harrison Chase
©... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/weaviate.html |
c371a58d3b78-0 | Source code for langchain.vectorstores.elastic_vector_search
"""Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Dict, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from la... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html |
c371a58d3b78-1 | )
self.embedding = embedding
self.index_name = index_name
try:
es_client = elasticsearch.Elasticsearch(elasticsearch_url) # noqa
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is misformatted. Got error: {e} "
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html |
c371a58d3b78-2 | bulk(self.client, requests)
# TODO: add option not to refresh
self.client.indices.refresh(index=self.index_name)
return ids
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html |
c371a58d3b78-3 | from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch.from_texts(
texts,
embeddings,
elasticsearch_url="http://localhost:9200"
)
"""... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html |
c371a58d3b78-4 | return cls(elasticsearch_url, index_name, embedding)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Mar 22, 2023. | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html |
fe65ef494ce0-0 | Source code for langchain.vectorstores.pinecone
"""Wrapper around Pinecone vector database."""
from __future__ import annotations
import uuid
from typing import Any, Callable, Iterable, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/pinecone.html |
fe65ef494ce0-1 | self._namespace = namespace
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
namespace: Optional[str] = None,
batch_size: int = 32,
**kwargs: Any,
) -> List[str]:
"""Run more ... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/pinecone.html |
fe65ef494ce0-2 | """Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. De... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/pinecone.html |
fe65ef494ce0-3 | namespace = self._namespace
query_obj = self._embedding_function(query)
docs = []
results = self._index.query(
[query_obj],
top_k=k,
include_metadata=True,
namespace=namespace,
filter=filter,
)
for res in results["matche... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/pinecone.html |
fe65ef494ce0-4 | "Please install it with `pip install pinecone-client`."
)
_index_name = index_name or str(uuid.uuid4())
indexes = pinecone.list_indexes() # checks if provided index exists
if _index_name in indexes:
index = pinecone.Index(_index_name)
else:
index = No... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/pinecone.html |
fe65ef494ce0-5 | cls,
index_name: str,
embedding: Embeddings,
text_key: str = "text",
namespace: Optional[str] = None,
) -> Pinecone:
"""Load pinecone vectorstore from index name."""
try:
import pinecone
except ImportError:
raise ValueError(
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/vectorstores/pinecone.html |
570a1515e3e0-0 | Source code for langchain.docstore.wikipedia
"""Wrapper around wikipedia API."""
from typing import Union
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
[docs]class Wikipedia(Docstore):
"""Wrapper around wikipedia API."""
def __init__(self) -> None:
"""Chec... | https://langchain.readthedocs.io/en/latest/_modules/langchain/docstore/wikipedia.html |
3ef557a80ca1-0 | Source code for langchain.docstore.in_memory
"""Simple in memory docstore in the form of a dict."""
from typing import Dict, Union
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
[docs]class InMemoryDocstore(Docstore, AddableMixin):
"""Simple in memory doc... | https://langchain.readthedocs.io/en/latest/_modules/langchain/docstore/in_memory.html |
048c5fea7e6f-0 | Source code for langchain.prompts.base
"""BasePrompt schema definition."""
from __future__ import annotations
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import yaml
from pydantic import BaseModel, Extra, Field, root_val... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/base.html |
048c5fea7e6f-1 | except KeyError as e:
raise ValueError(
"Invalid prompt schema; check for mismatched or missing input parameters. "
+ str(e)
)
class StringPromptValue(PromptValue):
text: str
def to_string(self) -> str:
"""Return prompt as string."""
return self.text
d... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/base.html |
048c5fea7e6f-2 | "internally, please rename."
)
overall = set(values["input_variables"]).intersection(
values["partial_variables"]
)
if overall:
raise ValueError(
f"Found overlapping input and partial variables: {overall}"
)
return values
[d... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/base.html |
048c5fea7e6f-3 | prompt_dict["_type"] = self._prompt_type
return prompt_dict
[docs] def save(self, file_path: Union[Path, str]) -> None:
"""Save the prompt.
Args:
file_path: Path to directory to save prompt to.
Example:
.. code-block:: python
prompt.save(file_path="path... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/base.html |
f936bb0096e3-0 | Source code for langchain.prompts.few_shot
"""Prompt template that contains few shot examples."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
check_valid_template,
)
f... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/few_shot.html |
f936bb0096e3-1 | """Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_select... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/few_shot.html |
f936bb0096e3-2 | # Get the examples to use.
examples = self._get_examples(**kwargs)
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/few_shot.html |
fc7dd9961644-0 | Source code for langchain.prompts.prompt
"""Prompt schema definition."""
from __future__ import annotations
from pathlib import Path
from string import Formatter
from typing import Any, Dict, List, Union
from pydantic import BaseModel, Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAP... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/prompt.html |
fc7dd9961644-1 | """
kwargs = self._merge_partial_and_user_variables(**kwargs)
return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs)
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that template and input variables are consistent."""
if value... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/prompt.html |
fc7dd9961644-2 | ) -> PromptTemplate:
"""Load a prompt from a file.
Args:
template_file: The path to the file containing the prompt template.
input_variables: A list of variable names the final prompt template
will expect.
Returns:
The prompt loaded from the fi... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/prompt.html |
52aa80b12a88-0 | Source code for langchain.prompts.loading
"""Load prompts from disk."""
import importlib
import json
import logging
from pathlib import Path
from typing import Union
import yaml
from langchain.output_parsers.regex import RegexParser
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.few_shot i... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/loading.html |
52aa80b12a88-1 | if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config: dict) -> dict:
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/loading.html |
52aa80b12a88-2 | config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
"Only one of example_prompt and example_prompt_path should "
... | https://langchain.readthedocs.io/en/latest/_modules/langchain/prompts/loading.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.