MB-IDK commited on
Commit
0dd82d9
Β·
verified Β·
1 Parent(s): 8cb592a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +376 -0
app.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ API OpenAI-compatible wrappant aifreeforever.com
3
+ Endpoints :
4
+ GET /v1/models
5
+ POST /v1/chat/completions (stream=false & stream=true)
6
+ """
7
+
8
+ import asyncio, json, time, uuid
9
+ from typing import Optional
10
+ from fastapi import FastAPI, HTTPException
11
+ from fastapi.responses import StreamingResponse
12
+ from pydantic import BaseModel
13
+ from playwright.async_api import async_playwright, Page, TimeoutError as PWTimeout
14
+
15
+ # ════════════════════════════════════════
16
+ # FastAPI
17
+ # ════════════════════════════════════════
18
+ app = FastAPI(title="Free Chat API")
19
+
20
+ # Limite 1 requΓͺte Playwright Γ  la fois (free tier = peu de RAM)
21
+ SEM = asyncio.Semaphore(1)
22
+
23
+ MODEL_NAME = "aifreeforever"
24
+
25
+ # ════════════════════════════════════════
26
+ # Modèles Pydantic (format OpenAI)
27
+ # ════════════════════════════════════════
28
+ class Message(BaseModel):
29
+ role: str
30
+ content: str
31
+
32
+ class ChatRequest(BaseModel):
33
+ model: Optional[str] = MODEL_NAME
34
+ messages: list[Message]
35
+ stream: Optional[bool] = False
36
+ temperature: Optional[float] = None
37
+ max_tokens: Optional[int] = None
38
+
39
+ # ════════════════════════════════════════
40
+ # CONFIG PLAYWRIGHT
41
+ # ════════════════════════════════════════
42
+ URL = "https://aifreeforever.com/tools/free-chatgpt-no-login"
43
+ HEADLESS = True
44
+ MAX_RETRIES = 5
45
+
46
+ SEL_TEXTAREA = 'textarea[placeholder*="Ask anything"]'
47
+ SEL_SEND_BTN = 'button.absolute.right-3.top-4.w-10.h-10'
48
+ SEL_BOT_MSG = '.flex.justify-start .rounded-2xl.shadow-sm.bg-white.border.border-gray-100'
49
+ SEL_BOT_CONTENT = '.markdown-content'
50
+ SEL_ACCEPT_BTN = 'button:has-text("Accept & Continue")'
51
+ SEL_AGE_BTN = 'button:has-text("13 and Over")'
52
+ SEL_COPY_BTN = (
53
+ 'button.text-xs.text-gray-500.flex.items-center.gap-1'
54
+ '.px-2.py-1.rounded-md.transition-colors:has-text("Copy")'
55
+ )
56
+ SEL_ERROR_MSG = 'p.text-red-600:has-text("Failed to send message")'
57
+
58
+ COOKIE_SELECTORS = [
59
+ 'button.unic-agree-all-button',
60
+ 'button:has-text("Accepter et continuer")',
61
+ 'button:has-text("Accepter tout")',
62
+ 'button:has-text("Accept all")',
63
+ 'button:has-text("Accept All")',
64
+ 'button:has-text("I agree")',
65
+ ]
66
+
67
+ AD_SELECTORS = [
68
+ 'button:has(path[fill="#1D1D1B"][opacity="0.7"])',
69
+ 'button:has(path[style*="stroke: rgb(255, 255, 255)"][style*="stroke-width: 6.353"])',
70
+ 'button[aria-label*="close" i]',
71
+ 'button[aria-label*="fermer" i]',
72
+ 'button[title*="close" i]',
73
+ 'button:has-text("Γ—")',
74
+ 'button:has-text("βœ•")',
75
+ 'button:has-text("Close")',
76
+ '[class*="close-btn"]',
77
+ ]
78
+
79
+ # ════════════════════════════════════════
80
+ # HELPERS PLAYWRIGHT (identiques au script)
81
+ # ════════════════════════════════════════
82
+
83
+ async def _try_click(page: Page, selector: str, timeout: int) -> bool:
84
+ try:
85
+ btn = page.locator(selector).first
86
+ await btn.wait_for(state="visible", timeout=timeout)
87
+ await btn.click()
88
+ return True
89
+ except PWTimeout:
90
+ return False
91
+
92
+ async def click_first_visible(page: Page, selectors: list[str], timeout: int = 4000):
93
+ tasks = [_try_click(page, sel, timeout) for sel in selectors]
94
+ results = await asyncio.gather(*tasks, return_exceptions=True)
95
+ return any(r is True for r in results)
96
+
97
+ async def click_if_visible(page: Page, selector: str, timeout: int = 8000) -> bool:
98
+ return await _try_click(page, selector, timeout)
99
+
100
+ async def send_with_retry(page: Page, prompt: str) -> bool:
101
+ for attempt in range(1, MAX_RETRIES + 1):
102
+ if attempt > 1:
103
+ await page.locator(SEL_TEXTAREA).fill(prompt)
104
+ await page.locator(SEL_SEND_BTN).click(timeout=15_000)
105
+ await asyncio.sleep(2)
106
+ if await page.locator(SEL_ERROR_MSG).count() == 0:
107
+ return True
108
+ return False
109
+
110
+ async def try_copy_button(page: Page) -> str | None:
111
+ try:
112
+ msgs = page.locator(SEL_BOT_MSG)
113
+ count = await msgs.count()
114
+ if count == 0:
115
+ return None
116
+ last_msg = None
117
+ for i in range(count - 1, -1, -1):
118
+ msg = msgs.nth(i)
119
+ if await msg.locator(SEL_BOT_CONTENT).count() > 0:
120
+ last_msg = msg
121
+ break
122
+ if last_msg is None:
123
+ last_msg = msgs.nth(count - 1)
124
+ copy_btn = last_msg.locator(SEL_COPY_BTN)
125
+ await copy_btn.wait_for(state="visible", timeout=3000)
126
+ await copy_btn.click()
127
+ await asyncio.sleep(0.8)
128
+ text = await page.evaluate("async () => navigator.clipboard.readText()")
129
+ if text and len(text.strip()) > 20:
130
+ return text.strip()
131
+ except Exception:
132
+ pass
133
+ return None
134
+
135
+ async def scrape_last_bot_message(page: Page) -> str | None:
136
+ try:
137
+ msgs = page.locator(SEL_BOT_MSG)
138
+ count = await msgs.count()
139
+ for i in range(count - 1, -1, -1):
140
+ msg = msgs.nth(i)
141
+ content_el = msg.locator(SEL_BOT_CONTENT)
142
+ if await content_el.count() > 0:
143
+ text = (await content_el.first.inner_text()).strip()
144
+ if len(text) > 20:
145
+ return text
146
+ else:
147
+ text = (await msg.inner_text()).strip()
148
+ if len(text) > 80 and "Accept & Continue" not in text:
149
+ return text
150
+ except Exception:
151
+ pass
152
+ return None
153
+
154
+ async def get_response(page: Page) -> str:
155
+ text = await try_copy_button(page)
156
+ if text:
157
+ return text
158
+ return await scrape_last_bot_message(page) or ""
159
+
160
+ async def wait_for_stream_end(page: Page, timeout_s: int = 120) -> None:
161
+ prev_text = ""
162
+ stable = 0
163
+ elapsed = 0.0
164
+ interval = 0.6
165
+ while elapsed < timeout_s:
166
+ await asyncio.sleep(interval)
167
+ elapsed += interval
168
+ msgs = page.locator(SEL_BOT_MSG)
169
+ count = await msgs.count()
170
+ if count == 0:
171
+ continue
172
+ current = ""
173
+ for i in range(count - 1, -1, -1):
174
+ content_el = msgs.nth(i).locator(SEL_BOT_CONTENT)
175
+ if await content_el.count() > 0:
176
+ t = (await content_el.first.inner_text()).strip()
177
+ if len(t) > 20:
178
+ current = t
179
+ break
180
+ if not current:
181
+ continue
182
+ if current != prev_text:
183
+ stable = 0
184
+ else:
185
+ stable += 1
186
+ if stable >= 3:
187
+ try:
188
+ await msgs.nth(count - 1).locator(SEL_COPY_BTN).wait_for(
189
+ state="visible", timeout=1500
190
+ )
191
+ return
192
+ except PWTimeout:
193
+ stable = 0
194
+ prev_text = current
195
+
196
+ # ════════════════════════════════════════
197
+ # CŒUR : envoie un prompt, récupère la réponse
198
+ # ════════════════════════════════════════
199
+
200
+ async def chat(prompt: str) -> str:
201
+ async with async_playwright() as p:
202
+ browser = await p.chromium.launch(
203
+ headless=HEADLESS,
204
+ args=[
205
+ "--disable-blink-features=AutomationControlled",
206
+ "--disable-extensions", "--disable-default-apps",
207
+ "--no-first-run", "--no-sandbox", "--disable-gpu",
208
+ "--disable-dev-shm-usage",
209
+ "--window-size=1280,720",
210
+ ],
211
+ )
212
+ context = await browser.new_context(
213
+ user_agent=(
214
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
215
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
216
+ "Chrome/124.0.0.0 Safari/537.36"
217
+ ),
218
+ viewport={"width": 1280, "height": 720},
219
+ permissions=["clipboard-read", "clipboard-write"],
220
+ java_script_enabled=True,
221
+ )
222
+ await context.route(
223
+ "**/*",
224
+ lambda route: route.abort()
225
+ if route.request.resource_type in ("image", "media", "font")
226
+ else route.continue_(),
227
+ )
228
+ page = await context.new_page()
229
+
230
+ try:
231
+ await page.goto(URL, wait_until="domcontentloaded", timeout=60_000)
232
+
233
+ await asyncio.gather(
234
+ click_first_visible(page, AD_SELECTORS, timeout=3000),
235
+ click_first_visible(page, COOKIE_SELECTORS, timeout=5000),
236
+ )
237
+
238
+ await page.wait_for_selector(SEL_TEXTAREA, timeout=30_000)
239
+ await page.locator(SEL_TEXTAREA).fill(prompt)
240
+
241
+ if not await send_with_retry(page, prompt):
242
+ return ""
243
+
244
+ await asyncio.gather(
245
+ click_if_visible(page, SEL_AGE_BTN, timeout=8000),
246
+ click_if_visible(page, SEL_ACCEPT_BTN, timeout=8000),
247
+ )
248
+
249
+ await wait_for_stream_end(page, timeout_s=120)
250
+ response = await get_response(page)
251
+ finally:
252
+ await browser.close()
253
+
254
+ return response
255
+
256
+ # ════════════════════════════════════════
257
+ # HELPERS FORMAT OPENAI
258
+ # ════════════════════════════════════════
259
+
260
+ def _make_id():
261
+ return f"chatcmpl-{uuid.uuid4().hex[:29]}"
262
+
263
+ def _completion_response(content: str, model: str) -> dict:
264
+ return {
265
+ "id": _make_id(),
266
+ "object": "chat.completion",
267
+ "created": int(time.time()),
268
+ "model": model,
269
+ "choices": [
270
+ {
271
+ "index": 0,
272
+ "message": {"role": "assistant", "content": content},
273
+ "finish_reason": "stop",
274
+ }
275
+ ],
276
+ "usage": {
277
+ "prompt_tokens": 0,
278
+ "completion_tokens": 0,
279
+ "total_tokens": 0,
280
+ },
281
+ }
282
+
283
+ async def _stream_chunks(content: str, model: str):
284
+ """Génère des SSE au format OpenAI streaming."""
285
+ cid = _make_id()
286
+ created = int(time.time())
287
+
288
+ # Premier chunk (role)
289
+ chunk = {
290
+ "id": cid, "object": "chat.completion.chunk",
291
+ "created": created, "model": model,
292
+ "choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}, "finish_reason": None}],
293
+ }
294
+ yield f"data: {json.dumps(chunk)}\n\n"
295
+
296
+ # Contenu dΓ©coupΓ© mot par mot
297
+ words = content.split(" ")
298
+ for i, word in enumerate(words):
299
+ token = word if i == 0 else f" {word}"
300
+ chunk = {
301
+ "id": cid, "object": "chat.completion.chunk",
302
+ "created": created, "model": model,
303
+ "choices": [{"index": 0, "delta": {"content": token}, "finish_reason": None}],
304
+ }
305
+ yield f"data: {json.dumps(chunk)}\n\n"
306
+ await asyncio.sleep(0.02)
307
+
308
+ # Dernier chunk
309
+ chunk = {
310
+ "id": cid, "object": "chat.completion.chunk",
311
+ "created": created, "model": model,
312
+ "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
313
+ }
314
+ yield f"data: {json.dumps(chunk)}\n\n"
315
+ yield "data: [DONE]\n\n"
316
+
317
+ # ════════════════════════════════════════
318
+ # ENDPOINTS
319
+ # ════════════════════════════════════════
320
+
321
+ @app.get("/")
322
+ async def root():
323
+ return {"status": "ok", "message": "OpenAI-compatible API. Use /v1/chat/completions"}
324
+
325
+ @app.get("/v1/models")
326
+ async def list_models():
327
+ return {
328
+ "object": "list",
329
+ "data": [
330
+ {
331
+ "id": MODEL_NAME,
332
+ "object": "model",
333
+ "created": 1700000000,
334
+ "owned_by": "aifreeforever",
335
+ }
336
+ ],
337
+ }
338
+
339
+ @app.post("/v1/chat/completions")
340
+ async def chat_completions(req: ChatRequest):
341
+ # Construire le prompt Γ  partir des messages
342
+ # On prend le dernier message user, ou on concatène tout
343
+ parts = []
344
+ for m in req.messages:
345
+ if m.role == "system":
346
+ parts.append(f"[System] {m.content}")
347
+ elif m.role == "user":
348
+ parts.append(f"{m.content}")
349
+ elif m.role == "assistant":
350
+ parts.append(f"[Assistant] {m.content}")
351
+ prompt = "\n\n".join(parts)
352
+
353
+ if not prompt.strip():
354
+ raise HTTPException(status_code=400, detail="Empty prompt")
355
+
356
+ # SΓ©maphore : 1 seule requΓͺte Playwright Γ  la fois
357
+ async with SEM:
358
+ try:
359
+ content = await chat(prompt)
360
+ except Exception as e:
361
+ raise HTTPException(status_code=500, detail=str(e))
362
+
363
+ if not content:
364
+ raise HTTPException(status_code=502, detail="No response from upstream")
365
+
366
+ model = req.model or MODEL_NAME
367
+
368
+ # Streaming
369
+ if req.stream:
370
+ return StreamingResponse(
371
+ _stream_chunks(content, model),
372
+ media_type="text/event-stream",
373
+ )
374
+
375
+ # Non-streaming
376
+ return _completion_response(content, model)