sd / deepr1-14b.py
decula
2t4
9ab0715
import os
import torch
import gradio as gr
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer
from pynvml import *
from torch.cuda.amp import autocast # 导入混合精度训练
# Set environment variables for memory management
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
# Flag to check if GPU is present
HAS_GPU = False
# Model title and context size limit
ctx_limit = 20000
title = "DeepSeek R1 14B"
model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B"
# Get the GPU count
try:
nvmlInit()
GPU_COUNT = nvmlDeviceGetCount()
if GPU_COUNT > 0:
HAS_GPU = True
gpu_h = [nvmlDeviceGetHandleByIndex(i) for i in range(GPU_COUNT)]
except NVMLError as error:
print(error)
# Load the model
tokenizer = AutoTokenizer.from_pretrained(model_name)
# 使用混合精度加载模型
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16, # 使用 FP16 减少显存占用
device_map="auto" if HAS_GPU and GPU_COUNT > 1 else None # 自动分配到多块 GPU
)
# Move model to GPU(s) if available
if HAS_GPU:
if GPU_COUNT > 1:
# 使用 DataParallel 将模型分配到多块 GPU
model = torch.nn.DataParallel(model, device_ids=[i for i in range(GPU_COUNT)])
model = model.to("cuda")
else:
model = model.to("cpu")
# Prompt generation
def generate_prompt(instruction, input=""):
instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
if input:
return f"""Instruction: {instruction}
Input: {input}
Response:"""
else:
return f"""User: hi
Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
User: {instruction}
Assistant:"""
# Evaluation logic
def evaluate(
ctx,
token_count=200,
temperature=1.0,
top_p=0.7,
presencePenalty=0.1,
countPenalty=0.1,
):
print(ctx)
inputs = tokenizer(ctx, return_tensors="pt").to(model.device)
# 使用混合精度推理
with autocast():
outputs = model.generate(
inputs.input_ids,
max_length=token_count,
temperature=temperature,
top_p=top_p,
do_sample=True,
num_return_sequences=1
)
out_str = tokenizer.decode(outputs[0], skip_special_tokens=True)
if HAS_GPU:
for i in range(GPU_COUNT):
gpu_info = nvmlDeviceGetMemoryInfo(gpu_h[i])
print(f'GPU {i} vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
if HAS_GPU:
torch.cuda.empty_cache()
return out_str.strip()
# Examples and gradio blocks
examples = [
["Assistant: Sure! Here is a very detailed plan to create flying pigs:", 333, 1, 0.3, 0, 1],
["Assistant: Sure! Here are some ideas for FTL drive:", 333, 1, 0.3, 0, 1],
[generate_prompt("Tell me about ravens."), 333, 1, 0.3, 0, 1],
[generate_prompt("Écrivez un programme Python pour miner 1 Bitcoin, avec des commentaires."), 333, 1, 0.3, 0, 1],
[generate_prompt("東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。"), 333, 1, 0.3, 0, 1],
[generate_prompt("Write a story using the following information.", "A man named Alex chops a tree down."), 333, 1, 0.3, 0, 1],
["Assistant: Here is a very detailed plan to kill all mosquitoes:", 333, 1, 0.3, 0, 1],
['''Edward: I am Edward Elric from fullmetal alchemist. I am in the world of full metal alchemist and know nothing of the real world.
User: Hello Edward. What have you been up to recently?
Edward:''', 333, 1, 0.3, 0, 1],
[generate_prompt(""), 333, 1, 0.3, 0, 1],
['''''', 333, 1, 0.3, 0, 1],
]
##########################################################################
port=7860
use_frpc=True
frpconfigfile="7680.ini"
import subprocess
def install_Frpc(port, frpconfigfile, use_frpc):
if use_frpc:
subprocess.run(['chmod', '+x', './frpc'], check=True)
print(f'正在启动frp ,端口{port}')
subprocess.Popen(['./frpc', '-c', frpconfigfile])
install_Frpc('7860',frpconfigfile,use_frpc)
# Gradio blocks
with gr.Blocks(title=title) as demo:
gr.HTML(f"<div style=\"text-align: center;\">\n<h1>DeepSeek R1 7B - {title}</h1>\n</div>")
with gr.Tab("Raw Generation"):
gr.Markdown(f"This is DeepSeek R1 7B - a powerful language model. Supports all 100+ world languages and code. Demo limited to ctxlen {ctx_limit}.")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(lines=2, label="Prompt", value="")
with gr.Column():
with gr.Row():
submit = gr.Button("Submit", variant="primary")
clear = gr.Button("Clear", variant="secondary")
output = gr.Textbox(label="Output", lines=5)
data = gr.Dataset(components=[prompt], label="Example Instructions", headers=["Prompt", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
submit.click(evaluate, [prompt], [output])
clear.click(lambda: None, [], [output])
data.click(lambda x: x, [data], [prompt])
# Gradio launch
demo.launch(share=False)