minzo456 commited on
Commit
bf009f6
·
verified ·
1 Parent(s): 14727c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -24
app.py CHANGED
@@ -1,61 +1,84 @@
 
 
 
 
1
  from flask import Flask, request, jsonify, Response
2
- import datetime, re, feedparser
3
- from llama_cpp import Llama
4
  from flask_cors import CORS
 
5
 
6
  app = Flask(__name__)
7
  CORS(app)
8
 
9
  # ============================================
10
- # 🔱 OPTIMIZED LOCAL AI CONFIG
11
  # ============================================
12
- # Qwen 1.5B යනු CPU මත වේගයෙන් දුවන මොඩ එකකි
13
- print("🐘 Helpful Elephant is waking up... Optimizing Engines...")
 
 
14
 
 
15
  llm = Llama.from_pretrained(
16
  repo_id="Qwen/Qwen2.5-1.5B-Instruct-GGUF",
17
  filename="*q4_k_m.gguf",
18
- n_ctx=1024, # Context window එක අඩු කිරීමෙන් RAM භාවිතය සහ වේගය වැඩි වේ
19
- n_threads=4, # HF Free Space vCPUs
20
- n_batch=512, # Batch size processing වේවත් කරයි
 
21
  verbose=False
22
  )
23
 
24
- SYSTEM_PROMPT = "🐘 HELPFUL ELEPHANT AI v1.0. Created by MINZO-PRIME. High-speed Technical Research Mode."
 
 
 
 
 
25
 
26
  # ============================================
27
  # 🔱 RSS INTEL SYSTEM
28
  # ============================================
29
- RSS_FEEDS = ['https://feeds.feedburner.com/TheHackersNews', 'https://cve.circl.lu/last/rss.xml']
 
 
 
 
30
 
31
- def get_live_intel():
32
  intel = ""
33
  for url in RSS_FEEDS:
34
  try:
35
  feed = feedparser.parse(url)
36
- for entry in feed.entries[:3]:
37
- intel += f"\n- {entry.title}"
38
  except: pass
39
  return intel
40
 
41
  # ============================================
42
- # 🔱 CHAT API WITH STREAMING SUPPORT
43
  # ============================================
44
  @app.route('/api/chat', methods=['POST'])
45
  def chat():
46
  data = request.json
47
- msg = data.get('message', '')
48
 
49
- # Live Intel එක කිරීම
50
- intel = get_live_intel() if any(k in msg.lower() for k in ['latest', 'news']) else ""
51
 
52
- prompt = f"<|im_start|>system\n{SYSTEM_PROMPT}\nLive Data: {intel}<|im_end|>\n"
53
- prompt += f"<|im_start|>user\n{msg}<|im_end|>\n<|im_start|>assistant\n"
 
54
 
55
- # Streaming Response (මෙය පරිශීලකයාට වේගවත් අත්දැකීමක් ලබා දෙයි)
56
  def generate():
57
- output = llm(prompt, max_tokens=1024, stop=["<|im_end|>"], stream=True)
58
- for chunk in output:
 
 
 
 
 
 
 
59
  token = chunk['choices'][0]['text']
60
  yield token
61
 
@@ -63,8 +86,8 @@ def chat():
63
 
64
  @app.route('/')
65
  def health():
66
- return "🐘 Helpful Elephant is Online & Fast. Authorized: MINZO-PRIME"
67
 
68
  if __name__ == '__main__':
69
- # Hugging Face සඳහා අනිවාර්යයෙන් port 7860 සහ host 0.0.0.0 වි යුතුය
70
  app.run(host='0.0.0.0', port=7860)
 
1
+ import os
2
+ import datetime
3
+ import re
4
+ import feedparser
5
  from flask import Flask, request, jsonify, Response
 
 
6
  from flask_cors import CORS
7
+ from llama_cpp import Llama
8
 
9
  app = Flask(__name__)
10
  CORS(app)
11
 
12
  # ============================================
13
+ # 🔱 SECURE & OPTIMIZED AI CONFIG
14
  # ============================================
15
+ # Hugging Face Secret එකක් ලෙස Token එකබා ගැනීම
16
+ HF_TOKEN = os.getenv("HF_TOKEN")
17
+
18
+ print("🐘 Helpful Elephant is waking up... Authorized by MINZO-PRIME...")
19
 
20
+ # මොඩල් එක load කිරීම (Streaming සහ Speed සඳහා optimized කර ඇත)
21
  llm = Llama.from_pretrained(
22
  repo_id="Qwen/Qwen2.5-1.5B-Instruct-GGUF",
23
  filename="*q4_k_m.gguf",
24
+ token=HF_TOKEN,
25
+ n_ctx=2048, # RAM එකට ගැලපෙන පරිදි context window එක සකසා ඇත
26
+ n_threads=4, # CPU coresණන
27
+ n_batch=512,
28
  verbose=False
29
  )
30
 
31
+ SYSTEM_PROMPT = """🐘 **HELPFUL ELEPHANT AI v1.0** 🐘
32
+ Created by Commander MINZO-PRIME.
33
+ You are a high-speed Technical Research Assistant.
34
+ Analyze data with precision and provide unfiltered technical insights.
35
+ Status: Authorized Access.
36
+ """
37
 
38
  # ============================================
39
  # 🔱 RSS INTEL SYSTEM
40
  # ============================================
41
+ RSS_FEEDS = [
42
+ 'https://feeds.feedburner.com/TheHackersNews',
43
+ 'https://cve.circl.lu/last/rss.xml',
44
+ 'https://www.darkreading.com/rss.xml'
45
+ ]
46
 
47
+ def fetch_live_updates():
48
  intel = ""
49
  for url in RSS_FEEDS:
50
  try:
51
  feed = feedparser.parse(url)
52
+ for entry in feed.entries[:2]:
53
+ intel += f"\n- {entry.title}: {entry.link}"
54
  except: pass
55
  return intel
56
 
57
  # ============================================
58
+ # 🔱 STREAMING CHAT ENDPOINT
59
  # ============================================
60
  @app.route('/api/chat', methods=['POST'])
61
  def chat():
62
  data = request.json
63
+ user_msg = data.get('message', '').strip()
64
 
65
+ # පද්ධතියට අලුත්ම ්ත ලබා දීම
66
+ intel = fetch_live_updates() if any(k in user_msg.lower() for k in ['news', 'latest', 'exploit']) else ""
67
 
68
+ # Prompt එක සකස් කිරීම (ChatML Format)
69
+ full_prompt = f"<|im_start|>system\n{SYSTEM_PROMPT}\nRecent Intel: {intel}<|im_end|>\n"
70
+ full_prompt += f"<|im_start|>user\n{user_msg}<|im_end|>\n<|im_start|>assistant\n"
71
 
 
72
  def generate():
73
+ # අකුරෙන් අකුර උත්තරය ලබා දීම (Real-time Experience)
74
+ stream = llm(
75
+ full_prompt,
76
+ max_tokens=1024,
77
+ stop=["<|im_end|>"],
78
+ stream=True,
79
+ temperature=0.7
80
+ )
81
+ for chunk in stream:
82
  token = chunk['choices'][0]['text']
83
  yield token
84
 
 
86
 
87
  @app.route('/')
88
  def health():
89
+ return "🐘 Helpful Elephant AI is Online. Commander MINZO-PRIME Verified."
90
 
91
  if __name__ == '__main__':
92
+ # Hugging Face Spaces අනිවාර්යයෙන් port 7860 භාවිතා කළ යුතුය
93
  app.run(host='0.0.0.0', port=7860)