| import streamlit as st |
| from dotenv import load_dotenv |
| from langchain.memory import ConversationBufferMemory |
| from langchain.chains import ConversationalRetrievalChain |
| from htmlTemplates import css, bot_template, user_template |
| |
| from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter |
|
|
| |
| from langchain_community.vectorstores import FAISS |
| from langchain_community.embeddings import HuggingFaceEmbeddings |
|
|
| |
| from langchain_community.document_loaders.pdf import PyPDFLoader |
| from langchain_community.document_loaders.text import TextLoader |
| from langchain_community.document_loaders.csv_loader import CSVLoader |
| from langchain_community.document_loaders.json_loader import JSONLoader |
| import tempfile |
| import os |
| import json |
| from langchain.docstore.document import Document |
| from langchain_groq import ChatGroq |
|
|
| |
| def get_pdf_text(pdf_docs): |
| temp_dir = tempfile.TemporaryDirectory() |
| temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) |
| with open(temp_filepath, "wb") as f: |
| f.write(pdf_docs.getvalue()) |
| pdf_loader = PyPDFLoader(temp_filepath) |
| pdf_doc = pdf_loader.load() |
| return pdf_doc |
|
|
|
|
| |
| def get_text_file(docs): |
| |
| |
| temp_dir = tempfile.TemporaryDirectory() |
| temp_filepath = os.path.join(temp_dir.name, docs.name) |
| with open(temp_filepath, "wb") as f: |
| f.write(docs.getvalue()) |
| |
| text_loader = TextLoader(temp_filepath) |
| text_doc = text_loader.load() |
| return text_doc |
|
|
|
|
| |
| def get_csv_file(docs): |
| |
| temp_dir = tempfile.TemporaryDirectory() |
| temp_filepath = os.path.join(temp_dir.name, docs.name) |
| with open(temp_filepath, "wb") as f: |
| f.write(docs.getvalue()) |
| |
| |
| |
| csv_loader = CSVLoader(temp_filepath, encoding="utf8") |
| csv_doc = csv_loader.load() |
| return csv_doc |
|
|
| def get_json_file(file) -> list[Document]: |
| |
| raw = file.getvalue().decode("utf-8", errors="ignore") |
| data = json.loads(raw) |
|
|
| docs = [] |
|
|
| |
| |
| def add_doc(x): |
| docs.append(Document(page_content=json.dumps(x, ensure_ascii=False))) |
|
|
| if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list): |
| for s in data["scans"]: |
| rels = s.get("relationships", []) |
| if isinstance(rels, list) and rels: |
| for r in rels: |
| add_doc(r) |
| if not docs: |
| add_doc(data) |
| elif isinstance(data, list): |
| for item in data: |
| add_doc(item) |
| else: |
| add_doc(data) |
|
|
| return docs |
|
|
| |
| def get_text_chunks(documents): |
| text_splitter = RecursiveCharacterTextSplitter( |
| chunk_size=1000, |
| chunk_overlap=200, |
| length_function=len |
| ) |
|
|
| documents = text_splitter.split_documents(documents) |
| return documents |
|
|
|
|
| |
| def get_vectorstore(text_chunks): |
| |
| embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2', |
| model_kwargs={'device': 'cpu'}) |
| vectorstore = FAISS.from_documents(text_chunks, embeddings) |
| return vectorstore |
|
|
|
|
| def get_conversation_chain(vectorstore): |
| |
| llm = ChatGroq( |
| groq_api_key=os.environ.get("GROQ_API_KEY"), |
| model_name="llama-3.1-8b-instant", |
| temperature=0.75, |
| max_tokens=512 |
| ) |
|
|
| memory = ConversationBufferMemory( |
| memory_key="chat_history", |
| return_messages=True |
| ) |
| retriever = vectorstore.as_retriever(search_kwargs={"k": 3}) |
|
|
| conversation_chain = ConversationalRetrievalChain.from_llm( |
| llm=llm, |
| retriever=retriever, |
| memory=memory, |
| ) |
| return conversation_chain |
|
|
| |
| def handle_userinput(user_question): |
| print('user_question => ', user_question) |
| |
| response = st.session_state.conversation({'question': user_question}) |
| |
| st.session_state.chat_history = response['chat_history'] |
|
|
| for i, message in enumerate(st.session_state.chat_history): |
| if i % 2 == 0: |
| st.write(user_template.replace( |
| "{{MSG}}", message.content), unsafe_allow_html=True) |
| else: |
| st.write(bot_template.replace( |
| "{{MSG}}", message.content), unsafe_allow_html=True) |
|
|
|
|
| def main(): |
| load_dotenv() |
| st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama", |
| page_icon=":books:") |
| st.write(css, unsafe_allow_html=True) |
|
|
| if "conversation" not in st.session_state: |
| st.session_state.conversation = None |
| if "chat_history" not in st.session_state: |
| st.session_state.chat_history = None |
|
|
| st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:") |
| user_question = st.text_input("Ask a question about your documents:") |
| if user_question: |
| handle_userinput(user_question) |
|
|
| with st.sidebar: |
| st.subheader("Your documents") |
| docs = st.file_uploader( |
| "Upload your Files here and click on 'Process'", accept_multiple_files=True) |
| |
| |
| if st.button("Process[PDF]"): |
| with st.spinner("Processing"): |
| |
| doc_list = [] |
| for file in docs: |
| print('file - type : ', file.type) |
| if file.type in ['application/octet-stream', 'application/pdf']: |
| |
| doc_list.extend(get_pdf_text(file)) |
| else: |
| st.error("PDF ํ์ผ์ด ์๋๋๋ค.") |
| if not doc_list: |
| st.error("์ฒ๋ฆฌ ๊ฐ๋ฅํ ๋ฌธ์๋ฅผ ์ฐพ์ง ๋ชปํ์ต๋๋ค.") |
| st.stop() |
|
|
| text_chunks = get_text_chunks(doc_list) |
| vectorstore = get_vectorstore(text_chunks) |
| st.session_state.conversation = get_conversation_chain(vectorstore) |
|
|
| |
| if st.button("Process[TXT]"): |
| with st.spinner("Processing"): |
| |
| doc_list = [] |
| for file in docs: |
| print('file - type : ', file.type) |
| if file.type == 'text/plain': |
| |
| doc_list.extend(get_text_file(file)) |
| else: |
| st.error("TXT ํ์ผ์ด ์๋๋๋ค.") |
| if not doc_list: |
| st.error("์ฒ๋ฆฌ ๊ฐ๋ฅํ ๋ฌธ์๋ฅผ ์ฐพ์ง ๋ชปํ์ต๋๋ค.") |
| st.stop() |
|
|
| text_chunks = get_text_chunks(doc_list) |
| vectorstore = get_vectorstore(text_chunks) |
| st.session_state.conversation = get_conversation_chain(vectorstore) |
| |
| |
| if st.button("Process[CSV]"): |
| with st.spinner("Processing"): |
| |
| doc_list = [] |
| for file in docs: |
| print('file - type : ', file.type) |
| if file.type == 'text/csv': |
| |
| doc_list.extend(get_csv_file(file)) |
| else: |
| st.error("CSV ํ์ผ์ด ์๋๋๋ค.") |
| if not doc_list: |
| st.error("์ฒ๋ฆฌ ๊ฐ๋ฅํ ๋ฌธ์๋ฅผ ์ฐพ์ง ๋ชปํ์ต๋๋ค.") |
| st.stop() |
|
|
| text_chunks = get_text_chunks(doc_list) |
| vectorstore = get_vectorstore(text_chunks) |
| st.session_state.conversation = get_conversation_chain(vectorstore) |
|
|
| |
| if st.button("Process[JSON]"): |
| with st.spinner("Processing"): |
| |
| doc_list = [] |
| for file in docs: |
| print('file - type : ', file.type) |
| if file.type == 'application/json': |
| |
| doc_list.extend(get_json_file(file)) |
| else: |
| st.error("JSON ํ์ผ์ด ์๋๋๋ค.") |
| if not doc_list: |
| st.error("์ฒ๋ฆฌ ๊ฐ๋ฅํ ๋ฌธ์๋ฅผ ์ฐพ์ง ๋ชปํ์ต๋๋ค.") |
| st.stop() |
|
|
| text_chunks = get_text_chunks(doc_list) |
| vectorstore = get_vectorstore(text_chunks) |
| st.session_state.conversation = get_conversation_chain(vectorstore) |
|
|
|
|
| if __name__ == '__main__': |
| main() |