Spaces:
Runtime error
Runtime error
File size: 5,889 Bytes
aea8144 e48d327 aea8144 3e4a391 aea8144 e84cd12 aea8144 3e4a391 e48d327 e84cd12 e48d327 aea8144 e48d327 3e4a391 e48d327 aea8144 3e4a391 e48d327 3e4a391 e48d327 3e4a391 e48d327 aea8144 e48d327 aea8144 23b2723 3e4a391 aea8144 23b2723 3e4a391 23b2723 099758e 23b2723 3e4a391 23b2723 3e4a391 e48d327 aea8144 e48d327 e84cd12 3e4a391 e48d327 aea8144 e48d327 aea8144 3e4a391 aea8144 e48d327 aea8144 e48d327 b63bb09 4ed62bb b63bb09 4ed62bb b63bb09 4ed62bb 3e4a391 aea8144 3e4a391 e48d327 aea8144 4ed62bb 3e4a391 e48d327 aea8144 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 | import os
import json
import numpy as np
import gradio as gr
from huggingface_hub import InferenceClient
# --- Configuration ---
KNOWLEDGE_BASE_DIR = "knowledge_base"
# --- Step 1: Load documents ---
def load_documents():
documents = []
filenames = []
for filename in os.listdir(KNOWLEDGE_BASE_DIR):
if filename.endswith(".txt"):
filepath = os.path.join(KNOWLEDGE_BASE_DIR, filename)
with open(filepath, "r", encoding="utf-8", errors="ignore") as f:
content = f.read().strip()
if content:
documents.append(content)
filenames.append(filename)
return documents, filenames
# --- Step 2: Chunk documents ---
def chunk_text(text, chunk_size=500, overlap=100):
chunks = []
start = 0
while start < len(text):
end = start + chunk_size
chunks.append(text[start:end])
start += chunk_size - overlap
return chunks
# --- Step 3: Get embeddings via HF API ---
def get_embeddings(texts, client):
embeddings = []
for text in texts:
response = client.feature_extraction(text, model="sentence-transformers/all-MiniLM-L6-v2")
emb = np.array(response)
if emb.ndim == 2:
emb = emb.mean(axis=0)
embeddings.append(emb)
return np.array(embeddings)
# --- Step 4: Simple vector search with numpy ---
def cosine_similarity(a, b):
a_norm = a / (np.linalg.norm(a, axis=-1, keepdims=True) + 1e-10)
b_norm = b / (np.linalg.norm(b, axis=-1, keepdims=True) + 1e-10)
return np.dot(a_norm, b_norm.T)
class SimpleVectorStore:
def __init__(self):
self.chunks = []
self.sources = []
self.embeddings = None
def add(self, chunks, sources, embeddings):
self.chunks = chunks
self.sources = sources
self.embeddings = embeddings
def search(self, query_embedding, top_k=3):
scores = cosine_similarity(query_embedding.reshape(1, -1), self.embeddings)[0]
top_indices = np.argsort(scores)[-top_k:][::-1]
results = [(self.chunks[i], self.sources[i], float(scores[i])) for i in top_indices]
return results
# --- Step 5: Build the knowledge store ---
def build_store(documents, filenames, client):
all_chunks = []
all_sources = []
for doc, fname in zip(documents, filenames):
chunks = chunk_text(doc)
for chunk in chunks:
all_chunks.append(chunk)
all_sources.append(fname)
print(f"Embedding {len(all_chunks)} chunks via API...")
embeddings = get_embeddings(all_chunks, client)
print("Embeddings complete.")
store = SimpleVectorStore()
store.add(all_chunks, all_sources, embeddings)
return store
# --- Step 6: RAG query ---
def query_rag(question, store, client):
q_emb = get_embeddings([question], client)
results = store.search(q_emb[0], top_k=3)
context = "\n\n".join([chunk for chunk, src, score in results])
system_prompt = f"""You are an AI Twin that represents a person. Use ONLY the following context to answer the question.
If you don't know the answer from the context, say "I don't have that information in my profile."
Context:
{context}"""
try:
response = client.chat_completion(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": question}
],
model="meta-llama/Meta-Llama-3-8B-Instruct",
max_tokens=512,
temperature=0.3,
)
return response.choices[0].message.content.strip()
except Exception as e:
return f"Error: {str(e)}"
# --- Initialization ---
print("Starting AI Twin...")
hf_token = os.environ.get("HUGGINGFACEHUB_API_TOKEN", None)
hf_client = InferenceClient(token=hf_token)
docs, fnames = load_documents()
print(f"Loaded {len(docs)} documents: {fnames}")
vector_store = build_store(docs, fnames, hf_client)
print("Ready!")
# --- UI ---
def load_profile():
try:
with open(os.path.join(KNOWLEDGE_BASE_DIR, "profile.txt"), "r", encoding="utf-8") as f:
return f.read()
except:
return "Profile not found."
def respond(message, chat_history):
if not message:
return "", chat_history
if chat_history is None:
chat_history = []
chat_history.append({"role": "user", "content": message})
try:
answer = query_rag(message, vector_store, hf_client)
chat_history.append({"role": "assistant", "content": answer})
except Exception as e:
chat_history.append({"role": "assistant", "content": f"Error: {str(e)}"})
return "", chat_history
with gr.Blocks(title="My AI Twin", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🤖 My AI Twin")
gr.Markdown("Ask me anything about my professional background, skills, and projects!")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 📋 Profile Summary")
gr.Textbox(value=load_profile(), label="About Me", interactive=False, lines=15)
with gr.Column(scale=2):
chatbot = gr.Chatbot(label="Conversation", height=400, type="messages")
msg = gr.Textbox(label="Ask a question", placeholder="e.g. What are my skills?")
with gr.Row():
submit_btn = gr.Button("Submit", variant="primary")
clear = gr.Button("Clear")
msg.submit(respond, [msg, chatbot], [msg, chatbot])
submit_btn.click(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch()
|