Decim@97 commited on
Commit
04e75ed
·
1 Parent(s): d32f192

Knowbot first commit

Browse files
Files changed (15) hide show
  1. .env.example +4 -0
  2. .gitignore +126 -0
  3. README.md +34 -0
  4. app.py +14 -0
  5. extract_text.py +66 -0
  6. prompt.py +24 -0
  7. requirements.txt +0 -0
  8. store.py +6 -0
  9. style.py +50 -0
  10. ui/__init_.py +0 -0
  11. ui/chat_handler.py +148 -0
  12. ui/gradio.py +52 -0
  13. utils/__init__.py +0 -0
  14. utils/central_logging.py +51 -0
  15. whisper_singleton.py +47 -0
.env.example ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ HF_TOKEN=
2
+ OPENAI_API_KEY=
3
+ ALPHAVANTAGE_API_KEY=
4
+ PERPLEXITY_API_KEY=
.gitignore ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ===============================
2
+ # Python
3
+ # ===============================
4
+ __pycache__/
5
+ *.py[cod]
6
+ *.pyo
7
+ *.pyd
8
+ *.so
9
+ *.egg-info/
10
+ .eggs/
11
+ dist/
12
+ build/
13
+
14
+ # Virtual environments
15
+ .env
16
+ .venv
17
+ venv/
18
+ env/
19
+ myenv/
20
+ ENV/
21
+
22
+ # ===============================
23
+ # Environment & Secrets
24
+ # ===============================
25
+ .env.local
26
+ .env.*.local
27
+ .env.production
28
+ .env.development
29
+ .env.test
30
+ *.key
31
+ *.pem
32
+
33
+ # API keys / credentials
34
+ secrets/
35
+ credentials/
36
+ config/secrets.yaml
37
+ config/secrets.json
38
+
39
+ # ===============================
40
+ # Jupyter / Data Science
41
+ # ===============================
42
+ .ipynb_checkpoints/
43
+ *.ipynb
44
+
45
+ # ===============================
46
+ # ML / AI Artifacts
47
+ # ===============================
48
+ models/
49
+ checkpoints/
50
+ weights/
51
+ *.pt
52
+ *.pth
53
+ *.onnx
54
+ *.joblib
55
+ *.pkl
56
+
57
+ # Vector stores / RAG indexes
58
+ faiss_index/
59
+ chroma/
60
+ vectorstore/
61
+ embeddings/
62
+
63
+ # ===============================
64
+ # Logs & Runtime Files
65
+ # ===============================
66
+ logs/
67
+ *.log
68
+ *.out
69
+ *.err
70
+
71
+ # ===============================
72
+ # Gradio / FastAPI
73
+ # ===============================
74
+ gradio_cached_examples/
75
+ .gradio/
76
+ tmp/
77
+ uploads/
78
+
79
+ # ===============================
80
+ # Cache / Temp
81
+ # ===============================
82
+ .cache/
83
+ .mypy_cache/
84
+ .pytest_cache/
85
+ ruff_cache/
86
+ coverage/
87
+ htmlcov/
88
+
89
+ # ===============================
90
+ # OS / Editor
91
+ # ===============================
92
+ .DS_Store
93
+ Thumbs.db
94
+ .idea/
95
+ .vscode/
96
+ *.swp
97
+ *.swo
98
+
99
+ # ===============================
100
+ # Docker
101
+ # ===============================
102
+ docker-data/
103
+ *.tar
104
+
105
+ # ===============================
106
+ # Deployment
107
+ # ===============================
108
+ *.local
109
+ *.tfstate
110
+ *.tfstate.backup
111
+ .envrc
112
+
113
+ # ===============================
114
+ # Reports / Generated Content
115
+ # ===============================
116
+ reports/
117
+ outputs/
118
+ generated_images/
119
+ charts/
120
+ visualizations/
121
+
122
+ # ===============================
123
+ # Misc
124
+ # ===============================
125
+ *.bak
126
+ *.tmp
README.md CHANGED
@@ -11,3 +11,37 @@ short_description: ' Designed to be an intelligent assistant '
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+
16
+ # 🤖 KnowBot AI — Voice Transcription with Whisper (Gradio + OpenAI)
17
+
18
+ KnowBot AI is a simple **voice-to-text transcription app** built with **Gradio** and **OpenAI Whisper API (`whisper-1`)**.
19
+ It allows users to record their voice using a microphone and instantly get the transcription output.
20
+
21
+ ---
22
+
23
+ ## 🚀 Features
24
+
25
+ - 🎤 Record voice directly from the browser (microphone input)
26
+ - 🧠 Transcribe speech using **OpenAI Whisper (`whisper-1`)**
27
+ - 🌍 Supports accents and multiple languages
28
+ - 🖥️ Clean and simple Gradio interface
29
+
30
+ ---
31
+
32
+ ## 🛠️ Tech Stack
33
+
34
+ - Python 3.9+
35
+ - Gradio
36
+ - OpenAI API (Whisper-1)
37
+ - Whisper
38
+
39
+ ---
40
+
41
+ ## 📂 Project Structure
42
+
43
+ ```bash
44
+ KnowBotAI/
45
+ │── app.py
46
+ │── requirements.txt
47
+ │── README.md
app.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ui.gradio import launch_ui
2
+ from whisper_singleton import get_embedding,get_whisper
3
+ from utils.central_logging import setup_logging
4
+
5
+
6
+ setup_logging()
7
+
8
+ def warmup():
9
+ get_whisper()
10
+ get_embedding()
11
+
12
+ if __name__ == "__main__":
13
+ warmup()
14
+ launch_ui()
extract_text.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pypdf import PdfReader
2
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
3
+ from langchain_core.documents import Document
4
+ from langchain_openai import OpenAIEmbeddings
5
+ from langchain_chroma import Chroma
6
+ import re
7
+ import os
8
+
9
+ def extract_text_from_pdf(file_path:str) -> str:
10
+ reader = PdfReader(file_path)
11
+ text = ""
12
+ for page in reader.pages:
13
+ text += page.extract_text() or ""
14
+ return text
15
+
16
+ def pdf_to_documents(file_path:str,database_name:str,collection_name:str,embeddings:OpenAIEmbeddings,chunk_size=1000,chunk_overlap=200,metadata:dict=None):
17
+ text = extract_text_from_pdf(file_path)
18
+ text = re.sub(r"[^a-zA-Z0-9.,!?;:'\"()\s]", "", text)
19
+ if not text.strip():
20
+ return []
21
+
22
+ splitter = RecursiveCharacterTextSplitter(
23
+ chunk_size=chunk_size,
24
+ chunk_overlap=chunk_overlap)
25
+
26
+ chunks = splitter.split_text(text)
27
+ docs = []
28
+ for i,chunk in enumerate(chunks):
29
+ #print(f"index: {i} , {chunk}")
30
+ meta = metadata.copy() if metadata else {}
31
+ meta.update({"chunk":i})
32
+ docs.append(Document(page_content=chunk, metadata=meta))
33
+
34
+ if os.path.exists(database_name):
35
+ Chroma(persist_directory=database_name, embedding_function=embeddings,collection_name=collection_name).delete_collection()
36
+
37
+ vectorstore = Chroma.from_documents(documents=docs, embedding=embeddings, persist_directory=database_name,collection_name=collection_name)
38
+
39
+ return docs,vectorstore
40
+
41
+
42
+
43
+ def store_data(text:str,database_name:str,collection_name:str,embeddings:OpenAIEmbeddings):
44
+
45
+ text_splitter = RecursiveCharacterTextSplitter(
46
+ chunk_size = 1000,
47
+ chunk_overlap = 0,
48
+ separators = [" ", ",", "\n"]
49
+ )
50
+
51
+ #with open(file_path) as f:
52
+ # text = f.read()
53
+
54
+ texts = text_splitter.split_text(text)
55
+
56
+ #print(f"split: {texts}")
57
+ docs = [Document(page_content=t) for t in texts]
58
+
59
+
60
+ if os.path.exists(database_name):
61
+ Chroma(persist_directory=database_name, embedding_function=embeddings,collection_name=collection_name).delete_collection()
62
+
63
+ vectorstore = Chroma.from_documents(documents=docs, embedding=embeddings, persist_directory=database_name,collection_name=collection_name)
64
+ return vectorstore
65
+
66
+
prompt.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import PromptTemplate
2
+
3
+ def get_system_prompt():
4
+ return """
5
+ You are a helpful assistant. Only answer questions based on the context provided.
6
+ Do not make assumptions. If the answer is not in the context, respond with:
7
+ "I’m sorry, I don’t have an answer for that.
8
+
9
+ Conversation history:
10
+ {history}
11
+
12
+ Relevant context from documents:
13
+ {context}
14
+
15
+ User's Message:
16
+ {user_message}
17
+
18
+ Answer:
19
+ """
20
+
21
+ def get_prompt():
22
+ prompt_template = get_system_prompt()
23
+ prompt = PromptTemplate(input_variables=["history", "user_message", "context"], template=prompt_template)
24
+ return prompt
requirements.txt ADDED
Binary file (5.44 kB). View file
 
store.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from langchain_chroma import Chroma
2
+ from langchain_text_splitters import RecursiveCharacterTextSplitter, CharacterTextSplitter
3
+ from langchain_openai import OpenAIEmbeddings
4
+ from langchain_core.documents import Document
5
+ import os
6
+
style.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def get_gradio_style():
3
+ return """
4
+ body {
5
+ background-color: #1e1e1e;
6
+ color: white;
7
+ }
8
+ .gradio-container {
9
+ background-color: #1e1e1e;
10
+ }
11
+ .gr-chat-message.user, .gr-chat-message.assistant {
12
+ background-color: #2b2b2b;
13
+ color: white;
14
+ border-radius: 8px;
15
+ padding: 5px 10px;
16
+ margin: 5px 0;
17
+ }
18
+ .gr-button {
19
+ background-color: #444444;
20
+ color: white;
21
+ }
22
+ .gr-textbox textarea {
23
+ background-color: #2b2b2b;
24
+ color: white;
25
+ }
26
+ span.md h2{
27
+ color:white;
28
+ }
29
+
30
+ #component-279{
31
+ height: 150px;
32
+ }
33
+
34
+ span.svelte-7ddecg p{
35
+ color:white;
36
+ }
37
+
38
+ span.chatbot p{
39
+ color: black;
40
+ font-weight: bold;
41
+ font-style: italic;
42
+ font-family: "Arial", sans-serif;
43
+ }
44
+
45
+ textarea.svelte-1ae7ssi{
46
+ background: whitesmoke;
47
+ font-weight: bold;
48
+ }
49
+
50
+ """
ui/__init_.py ADDED
File without changes
ui/chat_handler.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from utils.central_logging import setup_logging,get_logger
3
+ import textwrap
4
+ from langchain_openai import OpenAI
5
+ from langchain_chroma import Chroma
6
+ #from langchain_community.document_loaders import SeleniumURLLoader
7
+ from dotenv import load_dotenv
8
+ import os
9
+ import openai
10
+
11
+
12
+ from langchain_openai import ChatOpenAI
13
+ from langchain_core.runnables import RunnableLambda
14
+ import chromadb
15
+
16
+ import gradio as gr
17
+ import time
18
+ import asyncio
19
+ import nest_asyncio
20
+ import threading
21
+ import re
22
+ from openai import OpenAI
23
+ #import streamlit as st
24
+
25
+ from whisper_singleton import get_embedding,save_file,transcribe_content
26
+ from extract_text import pdf_to_documents,store_data
27
+ from prompt import get_prompt,get_system_prompt
28
+
29
+
30
+ load_dotenv("./.env")
31
+
32
+ setup_logging()
33
+ logger = get_logger("chat")
34
+
35
+
36
+ _embedding = None
37
+ _retriever = None
38
+ _vectore_store = None
39
+
40
+ openai_api_key = os.getenv("OPENAI_API_KEY")
41
+
42
+ if openai_api_key:
43
+ logger.info("Open ai api key has been set")
44
+ else:
45
+ logger.error("No open ai api key has been found")
46
+
47
+
48
+
49
+
50
+ try:
51
+ llm_openai = ChatOpenAI(model='gpt-3.5-turbo',temperature=0)
52
+ client = OpenAI()
53
+ logger.info("Clients has been initialized")
54
+ except Exception as e:
55
+ logger.exception(f"An exception occured: {e}")
56
+
57
+
58
+
59
+ def handle_upload(file_path):
60
+ global _embedding
61
+ global _retriever
62
+ _embedding = get_embedding()
63
+ text_content = ""
64
+ status_message = ""
65
+ file_name = "./transcribe.txt"
66
+ try:
67
+ if file_path.lower().endswith(".pdf"):
68
+
69
+ collection_name = "pdffiles"
70
+ pdf_docs,_vectore_store = pdf_to_documents(file_path,"transcribe_db",collection_name,_embedding)
71
+ text_content = "\n\n".join([doc.page_content for doc in pdf_docs])
72
+ status_message = "📄 PDF file uploaded — extraction implemented."
73
+ logger.info(status_message)
74
+ #save_file(file_name,text_content)
75
+ elif file_path.lower().endswith(".mp3") or file_path.lower().endswith('.mp4'):
76
+ print(f"path:{file_path}")
77
+ if file_path.lower().endswith(".mp3"):
78
+ collection_name = "audios"
79
+ status_message = "🎧 MP3 uploaded — transcription implemented."
80
+ logger.info(status_message)
81
+ else:
82
+ collection_name = "videos"
83
+ status_message = "🎬 MP4 uploaded — video transcription implemented."
84
+ logger.info(status_message)
85
+
86
+ text_content = transcribe_content(file_path)
87
+ _vectore_store = store_data(text_content,"transcribe_db",collection_name,_embedding)
88
+ #save_file(file_name,text_content)
89
+ else:
90
+ status_message = "Invalid file format"
91
+ except Exception as e:
92
+ status_message = f"❌ Error processing file: {e}"
93
+ logger.exception(status_message)
94
+ _retriever = _vectore_store.as_retriever()
95
+ return status_message,text_content
96
+
97
+
98
+
99
+ def stream_response(user_input,history):
100
+
101
+ history = history or []
102
+
103
+ history.append({"role": "user", "content": user_input})
104
+ history.append({"role": "assistant", "content": ""})
105
+
106
+ context = ""
107
+ if _retriever is not None:
108
+ docs = _retriever.invoke(user_input)
109
+ context = "\n\n".join([d.page_content for d in docs])
110
+
111
+ formatted_history = "\n".join(
112
+ f"{m['role'].capitalize()}: {m['content']}"
113
+ for m in history
114
+ )
115
+
116
+
117
+
118
+ system_prompt = get_system_prompt().format(
119
+ history=formatted_history,
120
+ context=context,
121
+ user_message=user_input
122
+ )
123
+
124
+ messages = [
125
+ {"role": "system", "content": system_prompt},
126
+ {"role": "user", "content": user_input},
127
+ ]
128
+
129
+ partial_reply = ""
130
+
131
+ stream = client.chat.completions.create(
132
+ model="gpt-4o-mini",
133
+ messages=messages,
134
+ stream=True,
135
+ temperature = 0
136
+ )
137
+
138
+ for event in stream:
139
+ delta = event.choices[0].delta
140
+ if delta and delta.content:
141
+ token = delta.content
142
+ partial_reply += token
143
+ history[-1]["content"] = partial_reply
144
+ yield history, history, ""
145
+
146
+ history[-1]["content"] = partial_reply
147
+ yield history, history, ""
148
+
ui/gradio.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from style import get_gradio_style
2
+ from .chat_handler import stream_response,handle_upload
3
+ import gradio as gr
4
+
5
+
6
+
7
+
8
+
9
+ def show_button(file):
10
+ title = ""
11
+ content = ""
12
+ return title,content,gr.update(visible=bool(file))
13
+
14
+
15
+
16
+ def launch_ui():
17
+ with gr.Blocks(css=get_gradio_style) as demo:
18
+ gr.Markdown("## 🤖 💬 KnowBot AI — Document-Aware Chat Assistant")
19
+
20
+ with gr.Row():
21
+
22
+ with gr.Column(scale=1):
23
+ upload_file = gr.File(
24
+ label="Upload a PDF, MP4, or MP3 file",
25
+ file_types=[".pdf", ".mp4", ".mp3"],
26
+ type="filepath"
27
+ )
28
+ upload_button = gr.Button("Upload and Process", visible=False)
29
+ upload_status = gr.Markdown()
30
+ pdf_text_area = gr.Textbox(
31
+ label="PDF Text Content",
32
+ lines=15,
33
+ interactive=False,
34
+ placeholder="Extracted text will appear here...")
35
+ upload_file.change(fn=show_button,inputs=upload_file,outputs=[upload_status,pdf_text_area,upload_button])
36
+
37
+
38
+ with gr.Column(scale=3):
39
+ chatbot = gr.Chatbot(height=400,show_label=False, render_markdown=True)
40
+ #audio_input = gr.Audio(label="🎤 Record your message",type="filepath",sources=["microphone"],interactive=True)
41
+ msg = gr.Textbox(label="Your message")
42
+ clear = gr.Button("Clear Conversation")
43
+
44
+ state = gr.State([])
45
+ msg.submit(stream_response, [msg, state], [chatbot, state,msg])
46
+ clear.click(lambda: ([], [],""), None, [chatbot, state,msg])
47
+ upload_button.click(handle_upload,inputs=upload_file,outputs=[upload_status, pdf_text_area])
48
+ #audio_input.change(stream_response,inputs=[msg, state],outputs=[chatbot, state, msg])
49
+ demo.queue(default_concurrency_limit=64)
50
+ demo.launch(debug=True, share=False)
51
+
52
+
utils/__init__.py ADDED
File without changes
utils/central_logging.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import logging.handlers
3
+ from pathlib import Path
4
+
5
+ LOG_DIR = Path("logs")
6
+ LOG_DIR.mkdir(exist_ok=True)
7
+
8
+ LOG_FILE = LOG_DIR / "advisor.log"
9
+
10
+ LOG_FORMAT = (
11
+ "%(asctime)s | %(levelname)s | %(name)s | "
12
+ "%(funcName)s:%(lineno)d | %(message)s"
13
+ )
14
+
15
+ DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
16
+
17
+
18
+ def setup_logging(log_level=logging.INFO):
19
+ """Global logging configuration"""
20
+
21
+ root_logger = logging.getLogger()
22
+ root_logger.setLevel(log_level)
23
+
24
+ # Prevent duplicate logs in notebooks / reloads
25
+ if root_logger.handlers:
26
+ return
27
+
28
+ formatter = logging.Formatter(LOG_FORMAT, DATE_FORMAT)
29
+
30
+ # ---- File Handler (advisor.log) ----
31
+ file_handler = logging.handlers.RotatingFileHandler(
32
+ LOG_FILE,
33
+ maxBytes=10 * 1024 * 1024, # 10 MB
34
+ backupCount=5,
35
+ encoding="utf-8",
36
+ )
37
+ file_handler.setFormatter(formatter)
38
+ file_handler.setLevel(log_level)
39
+
40
+ # ---- Console Handler ----
41
+ console_handler = logging.StreamHandler()
42
+ console_handler.setFormatter(formatter)
43
+ console_handler.setLevel(log_level)
44
+
45
+ root_logger.addHandler(file_handler)
46
+ root_logger.addHandler(console_handler)
47
+
48
+
49
+ def get_logger(name: str) -> logging.Logger:
50
+ """Get a named logger"""
51
+ return logging.getLogger(name)
whisper_singleton.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils.central_logging import get_logger
2
+ from langchain_openai import OpenAIEmbeddings
3
+ from pathlib import Path
4
+ import whisper
5
+ import threading
6
+ import os
7
+
8
+
9
+ logger = get_logger("whisper")
10
+
11
+ _whisper_model = None
12
+ _lock = threading.Lock()
13
+ _embedding = None
14
+ _embedding_lock = threading.Lock()
15
+
16
+ def get_whisper():
17
+ global _whisper_model
18
+
19
+ if _whisper_model is None:
20
+ with _lock:
21
+ if _whisper_model is None:
22
+ _whisper_model = whisper.load_model("base")
23
+ logger.info("Whisper model has been loaded")
24
+ return _whisper_model
25
+
26
+ def get_embedding():
27
+ global _embedding
28
+
29
+ if _embedding is None:
30
+ with _embedding_lock:
31
+ if _embedding is None:
32
+ _embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
33
+ logger.info("Openai embedding has been initialized")
34
+ return _embedding
35
+
36
+
37
+ def transcribe_content(url_path:str) -> str:
38
+ safe_path = Path(url_path).resolve().as_posix()
39
+ model = get_whisper()
40
+ result = model.transcribe(url_path)
41
+ return result["text"]
42
+
43
+
44
+ def save_file(file_name,result):
45
+ with open(file_name,'w') as file:
46
+ file.write(result)
47
+