FROM vllm/vllm-openai-rocm:latest WORKDIR /workspace/cuda_first_repo COPY . /workspace/cuda_first_repo RUN if [ -f requirements.txt ]; then pip install --no-cache-dir -r requirements.txt; fi ENV HIP_VISIBLE_DEVICES=0 ENV PYTORCH_HIP_ALLOC_CONF=expandable_segments:True CMD ["python", "-c", "import torch; print('torch', torch.__version__); print('rocm_gpu_available', torch.cuda.is_available())"]