Spaces:
Sleeping
Sleeping
NickNYU commited on
Commit ·
bd59653
1
Parent(s): 0dc2eca
[bugfix]fix the cut-off issue due to LLM predict token limit(256 for openai python lib default), by setting temperature to 0 and set LLM predict method from compact-refine to refine
Browse files- .gitignore +1 -0
- app.py +4 -3
- core/__pycache__/lifecycle.cpython-310.pyc +0 -0
- core/test_lifecycle.py +0 -3
- dataset/docstore.json +0 -0
- dataset/index_store.json +1 -1
- dataset/vector_store.json +0 -0
- langchain_manager/manager.py +5 -0
- llama/service_context.py +22 -64
- llama/storage_context.py +67 -0
- requirements.txt +2 -1
- xpipe_wiki/manager_factory.py +2 -2
- xpipe_wiki/robot_manager.py +9 -3
.gitignore
CHANGED
|
@@ -56,6 +56,7 @@ coverage.xml
|
|
| 56 |
.hypothesis/
|
| 57 |
.pytest_cache/
|
| 58 |
.ruff_cache
|
|
|
|
| 59 |
|
| 60 |
# Translations
|
| 61 |
*.mo
|
|
|
|
| 56 |
.hypothesis/
|
| 57 |
.pytest_cache/
|
| 58 |
.ruff_cache
|
| 59 |
+
wandb/
|
| 60 |
|
| 61 |
# Translations
|
| 62 |
*.mo
|
app.py
CHANGED
|
@@ -9,9 +9,9 @@ from xpipe_wiki.manager_factory import XPipeRobotManagerFactory, XPipeRobotRevis
|
|
| 9 |
logging.basicConfig(
|
| 10 |
stream=sys.stdout, level=logging.INFO
|
| 11 |
) # logging.DEBUG for more verbose output
|
| 12 |
-
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
| 13 |
|
| 14 |
-
# Sidebar contents
|
| 15 |
with st.sidebar:
|
| 16 |
st.title("🤗💬 LLM Chat App")
|
| 17 |
st.markdown(
|
|
@@ -29,8 +29,9 @@ with st.sidebar:
|
|
| 29 |
|
| 30 |
def main() -> None:
|
| 31 |
st.header("X-Pipe Wiki 机器人 💬")
|
|
|
|
| 32 |
robot_manager = XPipeRobotManagerFactory.get_or_create(
|
| 33 |
-
XPipeRobotRevision.
|
| 34 |
)
|
| 35 |
robot = robot_manager.get_robot()
|
| 36 |
query = st.text_input("X-Pipe Wiki 问题:")
|
|
|
|
| 9 |
logging.basicConfig(
|
| 10 |
stream=sys.stdout, level=logging.INFO
|
| 11 |
) # logging.DEBUG for more verbose output
|
| 12 |
+
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
| 13 |
|
| 14 |
+
# # Sidebar contents
|
| 15 |
with st.sidebar:
|
| 16 |
st.title("🤗💬 LLM Chat App")
|
| 17 |
st.markdown(
|
|
|
|
| 29 |
|
| 30 |
def main() -> None:
|
| 31 |
st.header("X-Pipe Wiki 机器人 💬")
|
| 32 |
+
|
| 33 |
robot_manager = XPipeRobotManagerFactory.get_or_create(
|
| 34 |
+
XPipeRobotRevision.SIMPLE_OPENAI_VERSION_0
|
| 35 |
)
|
| 36 |
robot = robot_manager.get_robot()
|
| 37 |
query = st.text_input("X-Pipe Wiki 问题:")
|
core/__pycache__/lifecycle.cpython-310.pyc
CHANGED
|
Binary files a/core/__pycache__/lifecycle.cpython-310.pyc and b/core/__pycache__/lifecycle.cpython-310.pyc differ
|
|
|
core/test_lifecycle.py
CHANGED
|
@@ -1,10 +1,7 @@
|
|
| 1 |
-
import logging
|
| 2 |
from unittest import TestCase
|
| 3 |
|
| 4 |
from core.lifecycle import Lifecycle
|
| 5 |
|
| 6 |
-
logging.basicConfig()
|
| 7 |
-
|
| 8 |
|
| 9 |
class SubLifecycle(Lifecycle):
|
| 10 |
def __init__(self) -> None:
|
|
|
|
|
|
|
| 1 |
from unittest import TestCase
|
| 2 |
|
| 3 |
from core.lifecycle import Lifecycle
|
| 4 |
|
|
|
|
|
|
|
| 5 |
|
| 6 |
class SubLifecycle(Lifecycle):
|
| 7 |
def __init__(self) -> None:
|
dataset/docstore.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
dataset/index_store.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"index_store/data": {"
|
|
|
|
| 1 |
+
{"index_store/data": {"da495c94-4541-47e1-b93f-8535192a5f28": {"__type__": "vector_store", "__data__": "{\"index_id\": \"da495c94-4541-47e1-b93f-8535192a5f28\", \"summary\": null, \"nodes_dict\": {\"59108663-a5e1-4e3e-bb21-626158eef136\": \"59108663-a5e1-4e3e-bb21-626158eef136\", \"50de4ec9-febb-466f-9f9a-cc9296895e83\": \"50de4ec9-febb-466f-9f9a-cc9296895e83\", \"aa413a53-0dda-4ac4-8ae9-6e8e340bb4f0\": \"aa413a53-0dda-4ac4-8ae9-6e8e340bb4f0\", \"a0cc4323-ec8f-4fed-9401-e44125134341\": \"a0cc4323-ec8f-4fed-9401-e44125134341\", \"5321cc7b-2a86-48b8-b56c-415dde7c149b\": \"5321cc7b-2a86-48b8-b56c-415dde7c149b\", \"9e19fb91-8258-4aca-9692-2d027073499e\": \"9e19fb91-8258-4aca-9692-2d027073499e\", \"02e856e5-4211-4a27-9204-e966907f1d74\": \"02e856e5-4211-4a27-9204-e966907f1d74\", \"f3074870-8fbf-4322-b1d2-2111e6aac9af\": \"f3074870-8fbf-4322-b1d2-2111e6aac9af\", \"82677fb9-abe3-4038-8263-5576c47da4f2\": \"82677fb9-abe3-4038-8263-5576c47da4f2\", \"a08364a6-c23d-4df5-8b5d-84137fbebd4e\": \"a08364a6-c23d-4df5-8b5d-84137fbebd4e\", \"e45b082d-c3ec-45aa-b630-6db49a62728b\": \"e45b082d-c3ec-45aa-b630-6db49a62728b\", \"2c55445c-04b1-4705-9871-adaa02f38f1b\": \"2c55445c-04b1-4705-9871-adaa02f38f1b\", \"d0de9736-ccad-450e-b4a1-49d4cdb8b941\": \"d0de9736-ccad-450e-b4a1-49d4cdb8b941\", \"fd0d2375-39e2-4bce-8e39-1182a122a1b4\": \"fd0d2375-39e2-4bce-8e39-1182a122a1b4\", \"13221de7-6c68-4367-b1be-f35b06fc3a74\": \"13221de7-6c68-4367-b1be-f35b06fc3a74\", \"9f448401-cda9-4b5f-9a80-c79e111f9963\": \"9f448401-cda9-4b5f-9a80-c79e111f9963\", \"3bc7dfc2-3ddf-4384-a60c-6cd52e1314f4\": \"3bc7dfc2-3ddf-4384-a60c-6cd52e1314f4\", \"ce3e530c-ce2d-4f5f-a171-72a790c3c624\": \"ce3e530c-ce2d-4f5f-a171-72a790c3c624\", \"85f764bd-e560-48ba-a51e-2287b6fe19db\": \"85f764bd-e560-48ba-a51e-2287b6fe19db\", \"3a8e4c7c-9f7d-4735-93e7-9d847cff98de\": \"3a8e4c7c-9f7d-4735-93e7-9d847cff98de\", \"af881b61-03f4-4851-8946-794015e3436c\": \"af881b61-03f4-4851-8946-794015e3436c\", \"31579820-439e-4029-b8c4-a0d6528daa59\": \"31579820-439e-4029-b8c4-a0d6528daa59\"}, \"doc_id_dict\": {}, \"embeddings_dict\": {}}"}}}
|
dataset/vector_store.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
langchain_manager/manager.py
CHANGED
|
@@ -28,6 +28,11 @@ class LangChainAzureManager(BaseLangChainManager):
|
|
| 28 |
# model_name="text-davinci-003",
|
| 29 |
model="text-davinci-003",
|
| 30 |
client=None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
)
|
| 32 |
|
| 33 |
# Override
|
|
|
|
| 28 |
# model_name="text-davinci-003",
|
| 29 |
model="text-davinci-003",
|
| 30 |
client=None,
|
| 31 |
+
# temperature set to 0.0(default 0.7) to get a certain answer from OpenAI,
|
| 32 |
+
# as a wiki robot we won't want to get flexible answers
|
| 33 |
+
temperature=0.0,
|
| 34 |
+
# GPT-3 default is 4096, however, openai.py default is 256
|
| 35 |
+
max_tokens=2048,
|
| 36 |
)
|
| 37 |
|
| 38 |
# Override
|
llama/service_context.py
CHANGED
|
@@ -1,13 +1,26 @@
|
|
| 1 |
from abc import abstractmethod, ABC
|
| 2 |
|
| 3 |
-
from llama_index import ServiceContext, LLMPredictor, LangchainEmbedding
|
| 4 |
-
from llama_index import StorageContext
|
| 5 |
-
from typing import List
|
| 6 |
|
| 7 |
from core.lifecycle import Lifecycle
|
| 8 |
from langchain_manager.manager import BaseLangChainManager
|
| 9 |
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
class ServiceContextManager(Lifecycle, ABC):
|
| 12 |
@abstractmethod
|
| 13 |
def get_service_context(self) -> ServiceContext:
|
|
@@ -36,7 +49,9 @@ class AzureServiceContextManager(ServiceContextManager):
|
|
| 36 |
llm_predictor = LLMPredictor(llm=self.lc_manager.get_llm())
|
| 37 |
# configure service context
|
| 38 |
self.service_context = ServiceContext.from_defaults(
|
| 39 |
-
llm_predictor=llm_predictor,
|
|
|
|
|
|
|
| 40 |
)
|
| 41 |
|
| 42 |
def do_start(self) -> None:
|
|
@@ -95,7 +110,9 @@ class HuggingFaceChineseOptServiceContextManager(ServiceContextManager):
|
|
| 95 |
llm_predictor = LLMPredictor(self.lc_manager.get_llm())
|
| 96 |
# configure service context
|
| 97 |
self.service_context = ServiceContext.from_defaults(
|
| 98 |
-
llm_predictor=llm_predictor,
|
|
|
|
|
|
|
| 99 |
)
|
| 100 |
|
| 101 |
def do_start(self) -> None:
|
|
@@ -123,62 +140,3 @@ class HuggingFaceChineseOptServiceContextManager(ServiceContextManager):
|
|
| 123 |
"[do_dispose] total used token: %d",
|
| 124 |
self.service_context.llm_predictor.total_tokens_used,
|
| 125 |
)
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
class StorageContextManager(Lifecycle, ABC):
|
| 129 |
-
@abstractmethod
|
| 130 |
-
def get_storage_context(self) -> StorageContext:
|
| 131 |
-
pass
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
class LocalStorageContextManager(StorageContextManager):
|
| 135 |
-
storage_context: StorageContext
|
| 136 |
-
|
| 137 |
-
def __init__(
|
| 138 |
-
self,
|
| 139 |
-
service_context_manager: ServiceContextManager,
|
| 140 |
-
dataset_path: str = "./dataset",
|
| 141 |
-
) -> None:
|
| 142 |
-
super().__init__()
|
| 143 |
-
self.dataset_path = dataset_path
|
| 144 |
-
self.service_context_manager = service_context_manager
|
| 145 |
-
|
| 146 |
-
def get_storage_context(self) -> StorageContext:
|
| 147 |
-
return self.storage_context
|
| 148 |
-
|
| 149 |
-
def do_init(self) -> None:
|
| 150 |
-
from llama.utils import is_local_storage_files_ready
|
| 151 |
-
|
| 152 |
-
if is_local_storage_files_ready(self.dataset_path):
|
| 153 |
-
self.storage_context = StorageContext.from_defaults(
|
| 154 |
-
persist_dir=self.dataset_path
|
| 155 |
-
)
|
| 156 |
-
else:
|
| 157 |
-
docs = self._download()
|
| 158 |
-
self._indexing(docs)
|
| 159 |
-
|
| 160 |
-
def do_start(self) -> None:
|
| 161 |
-
# self.logger.info("[do_start]%", **self.storage_context.to_dict())
|
| 162 |
-
pass
|
| 163 |
-
|
| 164 |
-
def do_stop(self) -> None:
|
| 165 |
-
# self.logger.info("[do_stop]%", **self.storage_context.to_dict())
|
| 166 |
-
pass
|
| 167 |
-
|
| 168 |
-
def do_dispose(self) -> None:
|
| 169 |
-
self.storage_context.persist(self.dataset_path)
|
| 170 |
-
|
| 171 |
-
def _download(self) -> List[Document]:
|
| 172 |
-
from llama.data_loader import GithubLoader
|
| 173 |
-
|
| 174 |
-
loader = GithubLoader()
|
| 175 |
-
return loader.load()
|
| 176 |
-
|
| 177 |
-
def _indexing(self, docs: List[Document]) -> None:
|
| 178 |
-
from llama_index import GPTVectorStoreIndex
|
| 179 |
-
|
| 180 |
-
index = GPTVectorStoreIndex.from_documents(
|
| 181 |
-
docs, service_context=self.service_context_manager.get_service_context()
|
| 182 |
-
)
|
| 183 |
-
index.storage_context.persist(persist_dir=self.dataset_path)
|
| 184 |
-
self.storage_context = index.storage_context
|
|
|
|
| 1 |
from abc import abstractmethod, ABC
|
| 2 |
|
| 3 |
+
from llama_index import ServiceContext, LLMPredictor, LangchainEmbedding
|
|
|
|
|
|
|
| 4 |
|
| 5 |
from core.lifecycle import Lifecycle
|
| 6 |
from langchain_manager.manager import BaseLangChainManager
|
| 7 |
|
| 8 |
|
| 9 |
+
# def get_callback_manager() -> CallbackManager:
|
| 10 |
+
# from llama_index.callbacks import (
|
| 11 |
+
# WandbCallbackHandler,
|
| 12 |
+
# CallbackManager,
|
| 13 |
+
# LlamaDebugHandler,
|
| 14 |
+
# )
|
| 15 |
+
# llama_debug = LlamaDebugHandler(print_trace_on_end=True)
|
| 16 |
+
# # wandb.init args
|
| 17 |
+
# run_args = dict(
|
| 18 |
+
# project="llamaindex",
|
| 19 |
+
# )
|
| 20 |
+
# wandb_callback = WandbCallbackHandler(run_args=run_args)
|
| 21 |
+
# return CallbackManager([llama_debug, wandb_callback])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
class ServiceContextManager(Lifecycle, ABC):
|
| 25 |
@abstractmethod
|
| 26 |
def get_service_context(self) -> ServiceContext:
|
|
|
|
| 49 |
llm_predictor = LLMPredictor(llm=self.lc_manager.get_llm())
|
| 50 |
# configure service context
|
| 51 |
self.service_context = ServiceContext.from_defaults(
|
| 52 |
+
llm_predictor=llm_predictor,
|
| 53 |
+
embed_model=embedding,
|
| 54 |
+
# callback_manager=get_callback_manager(),
|
| 55 |
)
|
| 56 |
|
| 57 |
def do_start(self) -> None:
|
|
|
|
| 110 |
llm_predictor = LLMPredictor(self.lc_manager.get_llm())
|
| 111 |
# configure service context
|
| 112 |
self.service_context = ServiceContext.from_defaults(
|
| 113 |
+
llm_predictor=llm_predictor,
|
| 114 |
+
embed_model=embedding,
|
| 115 |
+
# callback_manager=get_callback_manager()
|
| 116 |
)
|
| 117 |
|
| 118 |
def do_start(self) -> None:
|
|
|
|
| 140 |
"[do_dispose] total used token: %d",
|
| 141 |
self.service_context.llm_predictor.total_tokens_used,
|
| 142 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama/storage_context.py
CHANGED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index import StorageContext
|
| 2 |
+
from typing import List
|
| 3 |
+
from abc import abstractmethod, ABC
|
| 4 |
+
|
| 5 |
+
from llama_index import Document
|
| 6 |
+
|
| 7 |
+
from core.lifecycle import Lifecycle
|
| 8 |
+
from llama.service_context import ServiceContextManager
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class StorageContextManager(Lifecycle, ABC):
|
| 12 |
+
@abstractmethod
|
| 13 |
+
def get_storage_context(self) -> StorageContext:
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class LocalStorageContextManager(StorageContextManager):
|
| 18 |
+
storage_context: StorageContext
|
| 19 |
+
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
service_context_manager: ServiceContextManager,
|
| 23 |
+
dataset_path: str = "./dataset",
|
| 24 |
+
) -> None:
|
| 25 |
+
super().__init__()
|
| 26 |
+
self.dataset_path = dataset_path
|
| 27 |
+
self.service_context_manager = service_context_manager
|
| 28 |
+
|
| 29 |
+
def get_storage_context(self) -> StorageContext:
|
| 30 |
+
return self.storage_context
|
| 31 |
+
|
| 32 |
+
def do_init(self) -> None:
|
| 33 |
+
from llama.utils import is_local_storage_files_ready
|
| 34 |
+
|
| 35 |
+
if is_local_storage_files_ready(self.dataset_path):
|
| 36 |
+
self.storage_context = StorageContext.from_defaults(
|
| 37 |
+
persist_dir=self.dataset_path
|
| 38 |
+
)
|
| 39 |
+
else:
|
| 40 |
+
docs = self._download()
|
| 41 |
+
self._indexing(docs)
|
| 42 |
+
|
| 43 |
+
def do_start(self) -> None:
|
| 44 |
+
# self.logger.info("[do_start]%", **self.storage_context.to_dict())
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
def do_stop(self) -> None:
|
| 48 |
+
# self.logger.info("[do_stop]%", **self.storage_context.to_dict())
|
| 49 |
+
pass
|
| 50 |
+
|
| 51 |
+
def do_dispose(self) -> None:
|
| 52 |
+
self.storage_context.persist(self.dataset_path)
|
| 53 |
+
|
| 54 |
+
def _download(self) -> List[Document]:
|
| 55 |
+
from llama.data_loader import GithubLoader
|
| 56 |
+
|
| 57 |
+
loader = GithubLoader()
|
| 58 |
+
return loader.load()
|
| 59 |
+
|
| 60 |
+
def _indexing(self, docs: List[Document]) -> None:
|
| 61 |
+
from llama_index import GPTVectorStoreIndex
|
| 62 |
+
|
| 63 |
+
index = GPTVectorStoreIndex.from_documents(
|
| 64 |
+
docs, service_context=self.service_context_manager.get_service_context()
|
| 65 |
+
)
|
| 66 |
+
index.storage_context.persist(persist_dir=self.dataset_path)
|
| 67 |
+
self.storage_context = index.storage_context
|
requirements.txt
CHANGED
|
@@ -6,4 +6,5 @@ black
|
|
| 6 |
mypy
|
| 7 |
accelerate
|
| 8 |
python-dotenv
|
| 9 |
-
sentence_transformers
|
|
|
|
|
|
| 6 |
mypy
|
| 7 |
accelerate
|
| 8 |
python-dotenv
|
| 9 |
+
sentence_transformers
|
| 10 |
+
wandb
|
xpipe_wiki/manager_factory.py
CHANGED
|
@@ -42,7 +42,7 @@ class XPipeRobotManagerFactory:
|
|
| 42 |
service_context_manager = AzureServiceContextManager(
|
| 43 |
lc_manager=LangChainAzureManager()
|
| 44 |
)
|
| 45 |
-
from llama.
|
| 46 |
|
| 47 |
dataset_path = os.getenv("XPIPE_WIKI_DATASET_PATH", "./dataset")
|
| 48 |
storage_context_manager = LocalStorageContextManager(
|
|
@@ -66,7 +66,7 @@ class XPipeRobotManagerFactory:
|
|
| 66 |
lc_manager=LangChainAzureManager()
|
| 67 |
)
|
| 68 |
|
| 69 |
-
from llama.
|
| 70 |
|
| 71 |
dataset_path = os.getenv("XPIPE_WIKI_DATASET_PATH", "./dataset")
|
| 72 |
storage_context_manager = LocalStorageContextManager(
|
|
|
|
| 42 |
service_context_manager = AzureServiceContextManager(
|
| 43 |
lc_manager=LangChainAzureManager()
|
| 44 |
)
|
| 45 |
+
from llama.storage_context import LocalStorageContextManager
|
| 46 |
|
| 47 |
dataset_path = os.getenv("XPIPE_WIKI_DATASET_PATH", "./dataset")
|
| 48 |
storage_context_manager = LocalStorageContextManager(
|
|
|
|
| 66 |
lc_manager=LangChainAzureManager()
|
| 67 |
)
|
| 68 |
|
| 69 |
+
from llama.storage_context import LocalStorageContextManager
|
| 70 |
|
| 71 |
dataset_path = os.getenv("XPIPE_WIKI_DATASET_PATH", "./dataset")
|
| 72 |
storage_context_manager = LocalStorageContextManager(
|
xpipe_wiki/robot_manager.py
CHANGED
|
@@ -3,10 +3,12 @@ from typing import Any
|
|
| 3 |
|
| 4 |
from llama_index import load_index_from_storage
|
| 5 |
from llama_index.indices.query.base import BaseQueryEngine
|
|
|
|
| 6 |
|
| 7 |
from core.helper import LifecycleHelper
|
| 8 |
from core.lifecycle import Lifecycle
|
| 9 |
-
from llama.service_context import ServiceContextManager
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
class XPipeWikiRobot(ABC):
|
|
@@ -23,7 +25,10 @@ class AzureOpenAIXPipeWikiRobot(XPipeWikiRobot):
|
|
| 23 |
self.query_engine = query_engine
|
| 24 |
|
| 25 |
def ask(self, question: str) -> Any:
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
class XPipeWikiRobotManager(Lifecycle):
|
|
@@ -61,7 +66,8 @@ class AzureXPipeWikiRobotManager(XPipeWikiRobotManager):
|
|
| 61 |
service_context=self.service_context_manager.get_service_context(),
|
| 62 |
)
|
| 63 |
self.query_engine = index.as_query_engine(
|
| 64 |
-
service_context=self.service_context_manager.get_service_context()
|
|
|
|
| 65 |
)
|
| 66 |
|
| 67 |
def do_stop(self) -> None:
|
|
|
|
| 3 |
|
| 4 |
from llama_index import load_index_from_storage
|
| 5 |
from llama_index.indices.query.base import BaseQueryEngine
|
| 6 |
+
from llama_index.indices.response import ResponseMode
|
| 7 |
|
| 8 |
from core.helper import LifecycleHelper
|
| 9 |
from core.lifecycle import Lifecycle
|
| 10 |
+
from llama.service_context import ServiceContextManager
|
| 11 |
+
from llama.storage_context import StorageContextManager
|
| 12 |
|
| 13 |
|
| 14 |
class XPipeWikiRobot(ABC):
|
|
|
|
| 25 |
self.query_engine = query_engine
|
| 26 |
|
| 27 |
def ask(self, question: str) -> Any:
|
| 28 |
+
print("question: ", question)
|
| 29 |
+
response = self.query_engine.query(question)
|
| 30 |
+
print("response type: ", type(response))
|
| 31 |
+
return response.__str__()
|
| 32 |
|
| 33 |
|
| 34 |
class XPipeWikiRobotManager(Lifecycle):
|
|
|
|
| 66 |
service_context=self.service_context_manager.get_service_context(),
|
| 67 |
)
|
| 68 |
self.query_engine = index.as_query_engine(
|
| 69 |
+
service_context=self.service_context_manager.get_service_context(),
|
| 70 |
+
response_mode=ResponseMode.REFINE,
|
| 71 |
)
|
| 72 |
|
| 73 |
def do_stop(self) -> None:
|