anugrah55's picture
Drop deprecated TRANSFORMERS_CACHE env var
895724f verified
raw
history blame contribute delete
992 Bytes
# Slim, well-tested CUDA + PyTorch base. Avoids HF transformers-pytorch-gpu's
# bloat and unsloth's CUDA-version sensitivity.
FROM pytorch/pytorch:2.5.1-cuda12.1-cudnn9-runtime
ENV PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
HF_HOME=/data/.cache/huggingface \
TOKENIZERS_PARALLELISM=false
WORKDIR /app
# System deps for bitsandbytes / build
RUN apt-get update && apt-get install -y --no-install-recommends \
git curl build-essential \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt /app/
RUN pip install --no-cache-dir -r requirements.txt
# Project code
COPY opensleuth_train /app/opensleuth_train
COPY train.py /app/
COPY entrypoint.sh /app/
RUN chmod +x /app/entrypoint.sh \
&& mkdir -p /data/opensleuth-grpo /data/.cache/huggingface
# HF Spaces health probe expects the container to expose a port; keep it open
# so the orchestrator considers us alive while training runs.
EXPOSE 7860
CMD ["/app/entrypoint.sh"]