hodfa840 commited on
Commit
42d74a1
·
1 Parent(s): f448e72

fix: resolve HfFolder import error by pinning dependencies and update README links

Browse files
Files changed (4) hide show
  1. README.md +3 -2
  2. deploy_hf.py +18 -0
  3. modules/llm.py +2 -4
  4. requirements.txt +2 -2
README.md CHANGED
@@ -18,11 +18,12 @@ pinned: false
18
  [![CI](https://github.com/hodfa840/-RetailMind-Self-Healing-LLM-for-Store-Intelligence/actions/workflows/ci.yml/badge.svg)](https://github.com/hodfa840/-RetailMind-Self-Healing-LLM-for-Store-Intelligence/actions)
19
  [![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue?logo=python&logoColor=white)](https://python.org)
20
  [![Gradio](https://img.shields.io/badge/Gradio-4.0%2B-orange?logo=gradio)](https://gradio.app)
21
- [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
 
22
 
23
  **An autonomous e-commerce AI that detects semantic drift in user intent and self-heals its own behavior in real time — no human in the loop.**
24
 
25
- [Live Demo](https://huggingface.co/spaces/Hodfa71/RetailMind) · [Architecture](#-architecture) · [How It Works](#-how-the-self-healing-loop-works) · [Technical Decisions](#-technical-decisions)
26
 
27
  </div>
28
 
 
18
  [![CI](https://github.com/hodfa840/-RetailMind-Self-Healing-LLM-for-Store-Intelligence/actions/workflows/ci.yml/badge.svg)](https://github.com/hodfa840/-RetailMind-Self-Healing-LLM-for-Store-Intelligence/actions)
19
  [![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue?logo=python&logoColor=white)](https://python.org)
20
  [![Gradio](https://img.shields.io/badge/Gradio-4.0%2B-orange?logo=gradio)](https://gradio.app)
21
+ [![Hugging Face Space](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Space-blue)](https://huggingface.co/spaces/Hodfa71/RetailMind)
22
+ [![Live Demo](https://img.shields.io/badge/Demo-Direct%20Link-green)](https://hodfa71-retailmind.hf.space/)
23
 
24
  **An autonomous e-commerce AI that detects semantic drift in user intent and self-heals its own behavior in real time — no human in the loop.**
25
 
26
+ [Hugging Face Space](https://huggingface.co/spaces/Hodfa71/RetailMind) · [Direct App URL](https://hodfa71-retailmind.hf.space/) · [Architecture](#-architecture) · [How It Works](#-how-the-self-healing-loop-works)
27
 
28
  </div>
29
 
deploy_hf.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from huggingface_hub import HfApi
3
+
4
+ token = open("hf_token").read().strip()
5
+ api = HfApi(token=token)
6
+
7
+ repo_id = "Hodfa71/RetailMind"
8
+ print(f"Creating Space: {repo_id}")
9
+ api.create_repo(repo_id=repo_id, repo_type="space", space_sdk="gradio", exist_ok=True)
10
+
11
+ print("Uploading files...")
12
+ api.upload_folder(
13
+ folder_path=".",
14
+ repo_id=repo_id,
15
+ repo_type="space",
16
+ allow_patterns=["*.py", "*.txt", "*.md", "modules/**/*.py", "tests/**/*.py"]
17
+ )
18
+ print(f"Deployed to https://huggingface.co/spaces/{repo_id}")
modules/llm.py CHANGED
@@ -81,10 +81,8 @@ def generate_response(
81
  gen = _get_pipeline()
82
  result = gen(
83
  messages,
84
- max_new_tokens=150,
85
- temperature=0.1,
86
- do_sample=True,
87
- top_p=0.85,
88
  return_full_text=False,
89
  )
90
  generated = result[0]["generated_text"]
 
81
  gen = _get_pipeline()
82
  result = gen(
83
  messages,
84
+ max_new_tokens=120,
85
+ do_sample=False,
 
 
86
  return_full_text=False,
87
  )
88
  generated = result[0]["generated_text"]
requirements.txt CHANGED
@@ -1,8 +1,8 @@
1
- gradio>=4.0.0
2
  transformers
3
  torch
4
  sentence-transformers
5
- huggingface_hub
6
  python-dotenv
7
  plotly
8
  numpy
 
1
+ gradio==5.1.0
2
  transformers
3
  torch
4
  sentence-transformers
5
+ huggingface_hub==0.26.2
6
  python-dotenv
7
  plotly
8
  numpy