| import spaces
|
| import gradio as gr
|
| import platform
|
| import os;
|
| import socket;
|
| import torch
|
|
|
| device = "cuda";
|
|
|
| if not torch.cuda.is_available() and device == "cuda":
|
| raise RuntimeError("CUDA device unavailable, please use Dockerfile.cpu instead.")
|
|
|
| RandomTensor = torch.randn(1, 2)
|
|
|
| print("Putting Tensor in device ...", device);
|
| RandomTensor.to(device)
|
|
|
| def sysinfo(newdev = device):
|
| currentDevice = RandomTensor.get_device();
|
| tensorExample = RandomTensor.to(newdev).norm(p=2, dim=1, keepdim=True).unsqueeze(-1).to(newdev)
|
|
|
| tocpu = tensorExample.cpu().squeeze().half().tolist()
|
|
|
| return f"""
|
| hostname: {platform.node()} {socket.gethostname()}
|
| dev cur: {currentDevice}
|
| dev ini: {device}
|
| dev new: {newdev}
|
| tensor: {tensorExample}
|
| toCpu: {tocpu}
|
| """;
|
|
|
| @spaces.GPU
|
| def gpu():
|
|
|
| return sysinfo();
|
|
|
|
|
| def nogpu():
|
|
|
| return sysinfo("cpu");
|
|
|
|
|
|
|
| with gr.Blocks() as demo:
|
| outgpu = gr.Textbox(lines=5);
|
| outnpu = gr.Textbox(lines=5);
|
| btngpu = gr.Button(value="gpu");
|
| btngpun = gr.Button(value="ngpu");
|
|
|
| btngpu.click(gpu, None, [outgpu]);
|
| btngpun.click(nogpu, None, [outnpu]);
|
|
|
|
|
|
|
| if __name__ == "__main__":
|
| demo.launch(
|
| share=False,
|
| debug=False,
|
| server_port=7860,
|
| server_name="0.0.0.0"
|
| )
|
|
|