al1kss commited on
Commit ·
930b9bd
1
Parent(s): 6882065
fix: apply automatic ruff formatting
Browse files
examples/unofficial-sample/lightrag_cloudflare_demo.py
CHANGED
|
@@ -4,13 +4,10 @@ import inspect
|
|
| 4 |
import logging
|
| 5 |
import logging.config
|
| 6 |
from lightrag import LightRAG, QueryParam
|
| 7 |
-
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
| 8 |
from lightrag.utils import EmbeddingFunc, logger, set_verbose_debug
|
| 9 |
from lightrag.kg.shared_storage import initialize_pipeline_status
|
| 10 |
|
| 11 |
import requests
|
| 12 |
-
import json
|
| 13 |
-
from functools import partial
|
| 14 |
import numpy as np
|
| 15 |
from dotenv import load_dotenv
|
| 16 |
|
|
@@ -21,29 +18,32 @@ load_dotenv(dotenv_path=".env", override=False)
|
|
| 21 |
|
| 22 |
|
| 23 |
""" ----========= IMPORTANT CHANGE THIS! =========---- """
|
| 24 |
-
cloudflare_api_key =
|
| 25 |
-
account_id =
|
| 26 |
|
| 27 |
# Authomatically changes
|
| 28 |
api_base_url = f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/"
|
| 29 |
|
| 30 |
|
| 31 |
# choose an embedding model
|
| 32 |
-
EMBEDDING_MODEL =
|
| 33 |
# choose a generative model
|
| 34 |
LLM_MODEL = "@cf/meta/llama-3.2-3b-instruct"
|
| 35 |
|
| 36 |
-
WORKING_DIR = "../dickens"
|
|
|
|
| 37 |
|
| 38 |
# Cloudflare init
|
| 39 |
class CloudflareWorker:
|
| 40 |
-
def __init__(
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
| 47 |
self.cloudflare_api_key = cloudflare_api_key
|
| 48 |
self.api_base_url = api_base_url
|
| 49 |
self.llm_model_name = llm_model_name
|
|
@@ -54,23 +54,21 @@ class CloudflareWorker:
|
|
| 54 |
async def _send_request(self, model_name: str, input_: dict, debug_log: str):
|
| 55 |
headers = {"Authorization": f"Bearer {self.cloudflare_api_key}"}
|
| 56 |
|
| 57 |
-
print(f
|
| 58 |
data sent to Cloudflare
|
| 59 |
~~~~~~~~~~~
|
| 60 |
{debug_log}
|
| 61 |
-
|
| 62 |
|
| 63 |
try:
|
| 64 |
response_raw = requests.post(
|
| 65 |
-
f"{self.api_base_url}{model_name}",
|
| 66 |
-
headers=headers,
|
| 67 |
-
json=input_
|
| 68 |
).json()
|
| 69 |
-
print(f
|
| 70 |
Cloudflare worker responded with:
|
| 71 |
~~~~~~~~~~~
|
| 72 |
{str(response_raw)}
|
| 73 |
-
|
| 74 |
result = response_raw.get("result", {})
|
| 75 |
|
| 76 |
if "data" in result: # Embedding case
|
|
@@ -82,22 +80,21 @@ class CloudflareWorker:
|
|
| 82 |
raise ValueError("Unexpected Cloudflare response format")
|
| 83 |
|
| 84 |
except Exception as e:
|
| 85 |
-
print(f
|
| 86 |
Cloudflare API returned:
|
| 87 |
~~~~~~~~~
|
| 88 |
Error: {e}
|
| 89 |
-
|
| 90 |
input("Press Enter to continue...")
|
| 91 |
return None
|
| 92 |
|
| 93 |
-
async def query(self, prompt, system_prompt: str =
|
| 94 |
-
|
| 95 |
# since no caching is used and we don't want to mess with everything lightrag, pop the kwarg it is
|
| 96 |
kwargs.pop("hashing_kv", None)
|
| 97 |
|
| 98 |
message = [
|
| 99 |
{"role": "system", "content": system_prompt},
|
| 100 |
-
{"role": "user", "content": prompt}
|
| 101 |
]
|
| 102 |
|
| 103 |
input_ = {
|
|
@@ -109,15 +106,15 @@ class CloudflareWorker:
|
|
| 109 |
return await self._send_request(
|
| 110 |
self.llm_model_name,
|
| 111 |
input_,
|
| 112 |
-
debug_log=f"\n- model used {self.llm_model_name}\n- system prompt: {system_prompt}\n- query: {prompt}"
|
| 113 |
)
|
| 114 |
|
| 115 |
async def embedding_chunk(self, texts: list[str]) -> np.ndarray:
|
| 116 |
-
print(f
|
| 117 |
TEXT inputted
|
| 118 |
~~~~~
|
| 119 |
{texts}
|
| 120 |
-
|
| 121 |
|
| 122 |
input_ = {
|
| 123 |
"text": texts,
|
|
@@ -128,12 +125,10 @@ class CloudflareWorker:
|
|
| 128 |
return await self._send_request(
|
| 129 |
self.embedding_model_name,
|
| 130 |
input_,
|
| 131 |
-
debug_log=f"\n-llm model name {self.embedding_model_name}\n- texts: {texts}"
|
| 132 |
)
|
| 133 |
|
| 134 |
|
| 135 |
-
|
| 136 |
-
|
| 137 |
def configure_logging():
|
| 138 |
"""Configure logging for the application"""
|
| 139 |
|
|
@@ -145,7 +140,9 @@ def configure_logging():
|
|
| 145 |
|
| 146 |
# Get log directory path from environment variable or use current directory
|
| 147 |
log_dir = os.getenv("LOG_DIR", os.getcwd())
|
| 148 |
-
log_file_path = os.path.abspath(
|
|
|
|
|
|
|
| 149 |
|
| 150 |
print(f"\nLightRAG compatible demo log file: {log_file_path}\n")
|
| 151 |
os.makedirs(os.path.dirname(log_file_path), exist_ok=True)
|
|
@@ -203,10 +200,10 @@ if not os.path.exists(WORKING_DIR):
|
|
| 203 |
|
| 204 |
async def initialize_rag():
|
| 205 |
cloudflare_worker = CloudflareWorker(
|
| 206 |
-
cloudflare_api_key
|
| 207 |
-
api_base_url
|
| 208 |
-
embedding_model_name
|
| 209 |
-
llm_model_name
|
| 210 |
)
|
| 211 |
|
| 212 |
rag = LightRAG(
|
|
@@ -269,7 +266,7 @@ async def main():
|
|
| 269 |
|
| 270 |
# Locate the location of what is needed to be added to the knowledge
|
| 271 |
# Can add several simultaneously by modifying code
|
| 272 |
-
with open("./book.txt", "r", encoding="utf-8") as f:
|
| 273 |
await rag.ainsert(f.read())
|
| 274 |
|
| 275 |
# Perform naive search
|
|
@@ -324,8 +321,6 @@ async def main():
|
|
| 324 |
else:
|
| 325 |
print(resp)
|
| 326 |
|
| 327 |
-
|
| 328 |
-
|
| 329 |
""" FOR TESTING (if you want to test straight away, after building. Uncomment this part"""
|
| 330 |
|
| 331 |
"""
|
|
|
|
| 4 |
import logging
|
| 5 |
import logging.config
|
| 6 |
from lightrag import LightRAG, QueryParam
|
|
|
|
| 7 |
from lightrag.utils import EmbeddingFunc, logger, set_verbose_debug
|
| 8 |
from lightrag.kg.shared_storage import initialize_pipeline_status
|
| 9 |
|
| 10 |
import requests
|
|
|
|
|
|
|
| 11 |
import numpy as np
|
| 12 |
from dotenv import load_dotenv
|
| 13 |
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
""" ----========= IMPORTANT CHANGE THIS! =========---- """
|
| 21 |
+
cloudflare_api_key = "YOUR_API_KEY"
|
| 22 |
+
account_id = "YOUR_ACCOUNT ID" # This is unique to your Cloudflare account
|
| 23 |
|
| 24 |
# Authomatically changes
|
| 25 |
api_base_url = f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/"
|
| 26 |
|
| 27 |
|
| 28 |
# choose an embedding model
|
| 29 |
+
EMBEDDING_MODEL = "@cf/baai/bge-m3"
|
| 30 |
# choose a generative model
|
| 31 |
LLM_MODEL = "@cf/meta/llama-3.2-3b-instruct"
|
| 32 |
|
| 33 |
+
WORKING_DIR = "../dickens" # you can change output as desired
|
| 34 |
+
|
| 35 |
|
| 36 |
# Cloudflare init
|
| 37 |
class CloudflareWorker:
|
| 38 |
+
def __init__(
|
| 39 |
+
self,
|
| 40 |
+
cloudflare_api_key: str,
|
| 41 |
+
api_base_url: str,
|
| 42 |
+
llm_model_name: str,
|
| 43 |
+
embedding_model_name: str,
|
| 44 |
+
max_tokens: int = 4080,
|
| 45 |
+
max_response_tokens: int = 4080,
|
| 46 |
+
):
|
| 47 |
self.cloudflare_api_key = cloudflare_api_key
|
| 48 |
self.api_base_url = api_base_url
|
| 49 |
self.llm_model_name = llm_model_name
|
|
|
|
| 54 |
async def _send_request(self, model_name: str, input_: dict, debug_log: str):
|
| 55 |
headers = {"Authorization": f"Bearer {self.cloudflare_api_key}"}
|
| 56 |
|
| 57 |
+
print(f"""
|
| 58 |
data sent to Cloudflare
|
| 59 |
~~~~~~~~~~~
|
| 60 |
{debug_log}
|
| 61 |
+
""")
|
| 62 |
|
| 63 |
try:
|
| 64 |
response_raw = requests.post(
|
| 65 |
+
f"{self.api_base_url}{model_name}", headers=headers, json=input_
|
|
|
|
|
|
|
| 66 |
).json()
|
| 67 |
+
print(f"""
|
| 68 |
Cloudflare worker responded with:
|
| 69 |
~~~~~~~~~~~
|
| 70 |
{str(response_raw)}
|
| 71 |
+
""")
|
| 72 |
result = response_raw.get("result", {})
|
| 73 |
|
| 74 |
if "data" in result: # Embedding case
|
|
|
|
| 80 |
raise ValueError("Unexpected Cloudflare response format")
|
| 81 |
|
| 82 |
except Exception as e:
|
| 83 |
+
print(f"""
|
| 84 |
Cloudflare API returned:
|
| 85 |
~~~~~~~~~
|
| 86 |
Error: {e}
|
| 87 |
+
""")
|
| 88 |
input("Press Enter to continue...")
|
| 89 |
return None
|
| 90 |
|
| 91 |
+
async def query(self, prompt, system_prompt: str = "", **kwargs) -> str:
|
|
|
|
| 92 |
# since no caching is used and we don't want to mess with everything lightrag, pop the kwarg it is
|
| 93 |
kwargs.pop("hashing_kv", None)
|
| 94 |
|
| 95 |
message = [
|
| 96 |
{"role": "system", "content": system_prompt},
|
| 97 |
+
{"role": "user", "content": prompt},
|
| 98 |
]
|
| 99 |
|
| 100 |
input_ = {
|
|
|
|
| 106 |
return await self._send_request(
|
| 107 |
self.llm_model_name,
|
| 108 |
input_,
|
| 109 |
+
debug_log=f"\n- model used {self.llm_model_name}\n- system prompt: {system_prompt}\n- query: {prompt}",
|
| 110 |
)
|
| 111 |
|
| 112 |
async def embedding_chunk(self, texts: list[str]) -> np.ndarray:
|
| 113 |
+
print(f"""
|
| 114 |
TEXT inputted
|
| 115 |
~~~~~
|
| 116 |
{texts}
|
| 117 |
+
""")
|
| 118 |
|
| 119 |
input_ = {
|
| 120 |
"text": texts,
|
|
|
|
| 125 |
return await self._send_request(
|
| 126 |
self.embedding_model_name,
|
| 127 |
input_,
|
| 128 |
+
debug_log=f"\n-llm model name {self.embedding_model_name}\n- texts: {texts}",
|
| 129 |
)
|
| 130 |
|
| 131 |
|
|
|
|
|
|
|
| 132 |
def configure_logging():
|
| 133 |
"""Configure logging for the application"""
|
| 134 |
|
|
|
|
| 140 |
|
| 141 |
# Get log directory path from environment variable or use current directory
|
| 142 |
log_dir = os.getenv("LOG_DIR", os.getcwd())
|
| 143 |
+
log_file_path = os.path.abspath(
|
| 144 |
+
os.path.join(log_dir, "lightrag_cloudflare_worker_demo.log")
|
| 145 |
+
)
|
| 146 |
|
| 147 |
print(f"\nLightRAG compatible demo log file: {log_file_path}\n")
|
| 148 |
os.makedirs(os.path.dirname(log_file_path), exist_ok=True)
|
|
|
|
| 200 |
|
| 201 |
async def initialize_rag():
|
| 202 |
cloudflare_worker = CloudflareWorker(
|
| 203 |
+
cloudflare_api_key=cloudflare_api_key,
|
| 204 |
+
api_base_url=api_base_url,
|
| 205 |
+
embedding_model_name=EMBEDDING_MODEL,
|
| 206 |
+
llm_model_name=LLM_MODEL,
|
| 207 |
)
|
| 208 |
|
| 209 |
rag = LightRAG(
|
|
|
|
| 266 |
|
| 267 |
# Locate the location of what is needed to be added to the knowledge
|
| 268 |
# Can add several simultaneously by modifying code
|
| 269 |
+
with open("./book.txt", "r", encoding="utf-8") as f:
|
| 270 |
await rag.ainsert(f.read())
|
| 271 |
|
| 272 |
# Perform naive search
|
|
|
|
| 321 |
else:
|
| 322 |
print(resp)
|
| 323 |
|
|
|
|
|
|
|
| 324 |
""" FOR TESTING (if you want to test straight away, after building. Uncomment this part"""
|
| 325 |
|
| 326 |
"""
|