DeepSeek-OCR 2: Visual Causal Flow
Paper • 2601.20552 • Published • 68
This is a port of deepseek-ai/DeepSeek-OCR-2 for the transformers library. 🤗

import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModelForImageTextToText
model_id = "thisisiron/DeepSeek-OCR-2-hf"
model = AutoModelForImageTextToText.from_pretrained(
model_id, torch_dtype=torch.bfloat16, device_map="auto"
)
processor = AutoProcessor.from_pretrained(model_id)
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
inputs = processor(images=image, text="<image>\nFree OCR.", return_tensors="pt").to(
model.device, dtype=torch.bfloat16
)
with torch.inference_mode():
generate_ids = model.generate(**inputs, do_sample=False, max_new_tokens=4096)
print(processor.decode(generate_ids[0, inputs["input_ids"].shape[1]:], skip_special_tokens=True))
The <|grounding|> token enables coordinate-aware output with <|ref|> and <|det|> tags.
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModelForImageTextToText
model_id = "thisisiron/DeepSeek-OCR-2-hf"
model = AutoModelForImageTextToText.from_pretrained(
model_id, torch_dtype=torch.bfloat16, device_map="auto"
)
processor = AutoProcessor.from_pretrained(model_id)
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
inputs = processor(
images=image,
text="<image>\n<|grounding|>Convert the document to markdown.",
return_tensors="pt",
).to(model.device, dtype=torch.bfloat16)
with torch.inference_mode():
generate_ids = model.generate(**inputs, do_sample=False, max_new_tokens=4096)
output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1]:], skip_special_tokens=False
)
print(output)
@article{wei2026deepseek,
title={DeepSeek-OCR 2: Visual Causal Flow},
author={Wei, Haoran and Sun, Yaofeng and Li, Yukun},
journal={arXiv preprint arXiv:2601.20552},
year={2026}
}