import os, sys, torch, gc, subprocess import gradio as gr from datetime import datetime from pynvml import * from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from peft import PeftModel from kaggle_secrets import UserSecretsClient from huggingface_hub import login # --- 1. 配置与认证 --- model_id = "Qwen/Qwen3.5-9B" lora_repo = "decula/sd" port = 7860 use_frpc = True frpconfigfile = "7680.ini" # 确保该文件已上传至 Kaggle 工作目录 try: user_secrets = UserSecretsClient() hf_token = user_secrets.get_secret("DE_HF") if hf_token: login(token=hf_token) except: print("HF Token 获取失败,将尝试公开访问") # --- 2. 显存监控初始化 --- try: nvmlInit() GPU_COUNT = nvmlDeviceGetCount() gpu_h0 = nvmlDeviceGetHandleByIndex(0) gpu_h1 = nvmlDeviceGetHandleByIndex(1) if GPU_COUNT > 1 else None except Exception as e: print(f"NVML 初始化失败: {e}") GPU_COUNT = 0 # --- 3. 加载模型 (保持测试成功的逻辑) --- print(f"正在双卡部署模型: {model_id}...") bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) # 限制每张卡 11GB,留出 4GB 给 KV Cache 和 Gradio 进程 max_memory = {0: "11GiB", 1: "11GiB", "cpu": "20GiB"} base_model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=bnb_config, device_map="auto", max_memory=max_memory, trust_remote_code=True, torch_dtype=torch.float16, low_cpu_mem_usage=True ) try: model = PeftModel.from_pretrained(base_model, lora_repo) model.eval() print("✅ 适配器加载成功") except Exception as e: print(f"❌ 适配器加载失败: {e}") model = base_model tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token # --- 4. frpc 启动函数 --- def start_frpc(port, config_file, enabled): if enabled: if os.path.exists('./frpc'): subprocess.run(['chmod', '+x', './frpc'], check=True) print(f'正在启动 frpc 映射端口 {port}...') subprocess.Popen(['./frpc', '-c', config_file]) else: print("错误:当前目录下未找到 frpc 可执行文件") start_frpc(port, frpconfigfile, use_frpc) # --- 5. 推理评估逻辑 --- def evaluate( prompt, max_tokens=512, temperature=0.7, top_p=0.8, repetition_penalty=1.1 ): # 构建对话模板 messages = [{"role": "user", "content": prompt}] text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = tokenizer(text, return_tensors="pt").to(model.device) with torch.no_grad(): # 流式生成的简化模拟(Transformers 直接生成,此处 yield 最终结果) output_ids = model.generate( **inputs, max_new_tokens=int(max_tokens), do_sample=True, temperature=float(temperature), top_p=float(top_p), repetition_penalty=float(repetition_penalty), pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id ) response = tokenizer.decode(output_ids[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) # 打印显存状态 if GPU_COUNT > 0: info0 = nvmlDeviceGetMemoryInfo(gpu_h0) print(f"GPU0: {info0.used/1024**2:.0f}MB / GPU1: {nvmlDeviceGetMemoryInfo(gpu_h1).used/1024**2:.0f}MB" if gpu_h1 else f"GPU0: {info0.used/1024**2:.0f}MB") return response # --- 6. Gradio 界面设计 --- with gr.Blocks(title="Qwen3.5-9B Dual-GPU GUI") as demo: gr.HTML("

Qwen 3.5 9B + LoRA (Dual T4)

") with gr.Row(): with gr.Column(): input_text = gr.Textbox(lines=4, label="输入问题", placeholder="请输入您想咨询的内容...") with gr.Row(): btn_submit = gr.Button("发送请求", variant="primary") btn_clear = gr.Button("重置") with gr.Accordion("生成参数配置", open=False): tk_count = gr.Slider(128, 2048, label="最大生成长度", step=128, value=512) temp = gr.Slider(0.1, 1.5, label="温度 (Temperature)", step=0.1, value=0.7) tp = gr.Slider(0.1, 1.0, label="Top P", step=0.05, value=0.8) rep_p = gr.Slider(1.0, 1.5, label="重复惩罚", step=0.05, value=1.1) with gr.Column(): output_text = gr.Textbox(lines=12, label="AI 回复", interactive=False) # 绑定事件 btn_submit.click( evaluate, inputs=[input_text, tk_count, temp, tp, rep_p], outputs=[output_text] ) btn_clear.click(lambda: ("", ""), outputs=[input_text, output_text]) # --- 7. 启动 --- if __name__ == "__main__": # share=False 因为我们要用自己的 frpc 进行穿透 demo.launch(server_port=port, share=False)