lodestones commited on
Commit
3625530
·
1 Parent(s): d1eb9c4

Upload inference_tagger_standalone.py

Browse files
Files changed (1) hide show
  1. inference_tagger_standalone.py +460 -0
inference_tagger_standalone.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DINOv3 ViT-H/16+ Tagger — Fully Standalone Inference Script
2
+
3
+ Zero dependency on transformers, trainer code, or any internal module.
4
+ Only requires: torch, torchvision, safetensors, Pillow, requests.
5
+
6
+ pip install torch torchvision safetensors Pillow requests
7
+
8
+ The DINOv3 ViT-H/16+ architecture is implemented directly here, with weights
9
+ loaded from a .safetensors checkpoint. The state-dict key names match the
10
+ HuggingFace transformers layout exactly so checkpoints are interchangeable.
11
+
12
+ Usage
13
+ -----
14
+ # Single image, top-30 tags:
15
+ python inference_tagger_standalone.py \
16
+ --checkpoint tagger_checkpoints/2026-03-28_22-57-47.safetensors \
17
+ --vocab tagger_vocab.json \
18
+ --images photo.jpg \
19
+ --topk 30
20
+
21
+ # URL input:
22
+ python inference_tagger_standalone.py \
23
+ --checkpoint tagger_checkpoints/2026-03-28_22-57-47.safetensors \
24
+ --vocab tagger_vocab.json \
25
+ --images https://example.com/photo.jpg
26
+
27
+ # Threshold instead of top-k:
28
+ python inference_tagger_standalone.py ... --threshold 0.4
29
+
30
+ # Pipe-friendly comma-separated tags (one line per image):
31
+ python inference_tagger_standalone.py ... --format tags
32
+
33
+ # JSON output:
34
+ python inference_tagger_standalone.py ... --format json
35
+
36
+ Output formats (--format)
37
+ -------------------------
38
+ pretty (default) — human-readable table with scores
39
+ tags — comma-separated tag string, one line per image
40
+ json — JSON array of {file, tags: [{tag, score}]} objects
41
+ """
42
+
43
+ from __future__ import annotations
44
+
45
+ import argparse
46
+ import json
47
+ import math
48
+ import sys
49
+ from functools import lru_cache
50
+ from io import BytesIO
51
+ from pathlib import Path
52
+
53
+ import requests
54
+ import torch
55
+ import torch.nn as nn
56
+ import torch.nn.functional as F
57
+ import torchvision.transforms.v2 as v2
58
+ from PIL import Image
59
+ from safetensors.torch import load_file
60
+
61
+
62
+ # =============================================================================
63
+ # DINOv3 ViT-H/16+ — hardcoded architecture
64
+ # All hyperparameters match facebook/dinov3-vith16plus-pretrain-lvd1689m
65
+ # =============================================================================
66
+
67
+ D_MODEL = 1280
68
+ N_HEADS = 20
69
+ HEAD_DIM = D_MODEL // N_HEADS # 64
70
+ N_LAYERS = 32
71
+ D_FFN = 5120
72
+ N_REGISTERS = 4
73
+ PATCH_SIZE = 16
74
+ ROPE_THETA = 100.0
75
+ ROPE_RESCALE = 2.0 # pos_embed_rescale applied at inference
76
+ LN_EPS = 1e-5
77
+ LAYERSCALE = 1.0
78
+
79
+
80
+ # ---------------------------------------------------------------------------
81
+ # RoPE helpers
82
+ # ---------------------------------------------------------------------------
83
+
84
+ @lru_cache(maxsize=32)
85
+ def _patch_coords_cached(h: int, w: int, device_str: str) -> torch.Tensor:
86
+ """Normalised [-1,+1] patch-centre coordinates (float32, cached)."""
87
+ device = torch.device(device_str)
88
+ cy = torch.arange(0.5, h, dtype=torch.float32, device=device) / h
89
+ cx = torch.arange(0.5, w, dtype=torch.float32, device=device) / w
90
+ coords = torch.stack(torch.meshgrid(cy, cx, indexing="ij"), dim=-1).flatten(0, 1)
91
+ coords = 2.0 * coords - 1.0 # [0,1] → [-1,+1]
92
+ coords = coords * ROPE_RESCALE
93
+ return coords # [h*w, 2]
94
+
95
+
96
+ def _build_rope(h_patches: int, w_patches: int,
97
+ dtype: torch.dtype, device: torch.device):
98
+ """Return (cos, sin) of shape [1, 1, h*w, HEAD_DIM] for broadcasting."""
99
+ coords = _patch_coords_cached(h_patches, w_patches, str(device)) # [P, 2]
100
+ inv_freq = 1.0 / (ROPE_THETA ** torch.arange(
101
+ 0, 1, 4 / HEAD_DIM, dtype=torch.float32, device=device)) # [D/4]
102
+ angles = 2 * math.pi * coords[:, :, None] * inv_freq[None, None, :] # [P, 2, D/4]
103
+ angles = angles.flatten(1, 2).tile(2) # [P, D]
104
+ cos = torch.cos(angles).to(dtype).unsqueeze(0).unsqueeze(0) # [1,1,P,D]
105
+ sin = torch.sin(angles).to(dtype).unsqueeze(0).unsqueeze(0)
106
+ return cos, sin
107
+
108
+
109
+ def _rotate_half(x: torch.Tensor) -> torch.Tensor:
110
+ h = x.shape[-1] // 2
111
+ return torch.cat((-x[..., h:], x[..., :h]), dim=-1)
112
+
113
+
114
+ def _apply_rope(q: torch.Tensor, k: torch.Tensor,
115
+ cos: torch.Tensor, sin: torch.Tensor):
116
+ """Apply RoPE only to patch tokens (skip CLS + register prefix)."""
117
+ n_pre = 1 + N_REGISTERS
118
+ q_pre, q_pat = q[..., :n_pre, :], q[..., n_pre:, :]
119
+ k_pre, k_pat = k[..., :n_pre, :], k[..., n_pre:, :]
120
+ q_pat = q_pat * cos + _rotate_half(q_pat) * sin
121
+ k_pat = k_pat * cos + _rotate_half(k_pat) * sin
122
+ return torch.cat([q_pre, q_pat], dim=-2), torch.cat([k_pre, k_pat], dim=-2)
123
+
124
+
125
+ # ---------------------------------------------------------------------------
126
+ # Building blocks
127
+ # ---------------------------------------------------------------------------
128
+
129
+ class _Attention(nn.Module):
130
+ def __init__(self):
131
+ super().__init__()
132
+ self.q_proj = nn.Linear(D_MODEL, D_MODEL, bias=True)
133
+ self.k_proj = nn.Linear(D_MODEL, D_MODEL, bias=False)
134
+ self.v_proj = nn.Linear(D_MODEL, D_MODEL, bias=True)
135
+ self.o_proj = nn.Linear(D_MODEL, D_MODEL, bias=True)
136
+
137
+ def forward(self, x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor:
138
+ B, S, _ = x.shape
139
+ q = self.q_proj(x).view(B, S, N_HEADS, HEAD_DIM).transpose(1, 2)
140
+ k = self.k_proj(x).view(B, S, N_HEADS, HEAD_DIM).transpose(1, 2)
141
+ v = self.v_proj(x).view(B, S, N_HEADS, HEAD_DIM).transpose(1, 2)
142
+ q, k = _apply_rope(q, k, cos, sin)
143
+ out = F.scaled_dot_product_attention(q, k, v, scale=HEAD_DIM ** -0.5)
144
+ return self.o_proj(out.transpose(1, 2).reshape(B, S, D_MODEL))
145
+
146
+
147
+ class _GatedMLP(nn.Module):
148
+ def __init__(self):
149
+ super().__init__()
150
+ self.gate_proj = nn.Linear(D_MODEL, D_FFN, bias=True)
151
+ self.up_proj = nn.Linear(D_MODEL, D_FFN, bias=True)
152
+ self.down_proj = nn.Linear(D_FFN, D_MODEL, bias=True)
153
+
154
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
155
+ return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x))
156
+
157
+
158
+ class _Block(nn.Module):
159
+ def __init__(self):
160
+ super().__init__()
161
+ self.norm1 = nn.LayerNorm(D_MODEL, eps=LN_EPS)
162
+ self.attention = _Attention()
163
+ self.layer_scale1 = nn.Parameter(torch.full((D_MODEL,), LAYERSCALE))
164
+ self.norm2 = nn.LayerNorm(D_MODEL, eps=LN_EPS)
165
+ self.mlp = _GatedMLP()
166
+ self.layer_scale2 = nn.Parameter(torch.full((D_MODEL,), LAYERSCALE))
167
+
168
+ def forward(self, x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor:
169
+ x = x + self.attention(self.norm1(x), cos, sin) * self.layer_scale1
170
+ x = x + self.mlp(self.norm2(x)) * self.layer_scale2
171
+ return x
172
+
173
+
174
+ # ---------------------------------------------------------------------------
175
+ # Full backbone
176
+ # ---------------------------------------------------------------------------
177
+
178
+ class DINOv3ViTH(nn.Module):
179
+ """DINOv3 ViT-H/16+ backbone.
180
+
181
+ Accepts any H, W that are multiples of 16.
182
+ Returns last_hidden_state [B, 1+R+P, D_MODEL].
183
+ Token layout: [CLS, reg_0..reg_3, patch_0..patch_N].
184
+
185
+ State-dict keys are intentionally identical to the HuggingFace
186
+ transformers layout so .safetensors checkpoints load without remapping.
187
+ """
188
+
189
+ def __init__(self):
190
+ super().__init__()
191
+ # These names must match HF exactly
192
+ self.embeddings = _Embeddings()
193
+ self.layer = nn.ModuleList([_Block() for _ in range(N_LAYERS)])
194
+ self.norm = nn.LayerNorm(D_MODEL, eps=LN_EPS)
195
+
196
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata,
197
+ strict, missing_keys, unexpected_keys, error_msgs):
198
+ # HF stores layer_scale as a sub-module with a "lambda1" parameter;
199
+ # we store it as a plain Parameter directly on _Block.
200
+ # Remap "layer.i.layer_scale{1,2}.lambda1" → "layer.i.layer_scale{1,2}"
201
+ for k in list(state_dict.keys()):
202
+ if k.startswith(prefix) and ".layer_scale" in k and k.endswith(".lambda1"):
203
+ new_k = k[:-len(".lambda1")]
204
+ state_dict[new_k] = state_dict.pop(k)
205
+ # Drop rope_embeddings buffer (computed on-the-fly)
206
+ for k in list(state_dict.keys()):
207
+ if k.startswith(prefix) and "rope_embeddings" in k:
208
+ state_dict.pop(k)
209
+ super()._load_from_state_dict(
210
+ state_dict, prefix, local_metadata, strict,
211
+ missing_keys, unexpected_keys, error_msgs)
212
+
213
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
214
+ B, _, H, W = pixel_values.shape
215
+ x = self.embeddings(pixel_values) # [B, 1+R+P, D]
216
+
217
+ h_p, w_p = H // PATCH_SIZE, W // PATCH_SIZE
218
+ cos, sin = _build_rope(h_p, w_p, x.dtype, pixel_values.device)
219
+
220
+ for block in self.layer:
221
+ x = block(x, cos, sin)
222
+
223
+ return self.norm(x)
224
+
225
+
226
+ class _Embeddings(nn.Module):
227
+ """Patch + CLS + register token embeddings.
228
+ Key names match HF: embeddings.cls_token, embeddings.register_tokens,
229
+ embeddings.patch_embeddings.{weight,bias}.
230
+ """
231
+
232
+ def __init__(self):
233
+ super().__init__()
234
+ self.cls_token = nn.Parameter(torch.empty(1, 1, D_MODEL))
235
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, D_MODEL)) # unused at inference
236
+ self.register_tokens = nn.Parameter(torch.empty(1, N_REGISTERS, D_MODEL))
237
+ self.patch_embeddings = nn.Conv2d(3, D_MODEL, kernel_size=PATCH_SIZE, stride=PATCH_SIZE)
238
+
239
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
240
+ B = pixel_values.shape[0]
241
+ dtype = self.patch_embeddings.weight.dtype
242
+ patches = self.patch_embeddings(pixel_values.to(dtype)).flatten(2).transpose(1, 2)
243
+ cls = self.cls_token.expand(B, -1, -1)
244
+ regs = self.register_tokens.expand(B, -1, -1)
245
+ return torch.cat([cls, regs, patches], dim=1)
246
+
247
+
248
+ # =============================================================================
249
+ # Tagger head
250
+ # =============================================================================
251
+
252
+ class DINOv3Tagger(nn.Module):
253
+ """DINOv3 ViT-H/16+ backbone + linear projection head.
254
+
255
+ features = concat(CLS, reg_0..reg_3) → [B, (1+R)*D]
256
+ projection: Linear → [B, num_tags]
257
+ """
258
+
259
+ def __init__(self, num_tags: int, projection_bias: bool = False):
260
+ super().__init__()
261
+ self.backbone = DINOv3ViTH()
262
+ self.projection = nn.Linear((1 + N_REGISTERS) * D_MODEL, num_tags, bias=projection_bias)
263
+
264
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
265
+ hidden = self.backbone(pixel_values) # [B, S, D]
266
+ cls = hidden[:, 0, :] # [B, D]
267
+ regs = hidden[:, 1: 1 + N_REGISTERS, :].flatten(1) # [B, R*D]
268
+ features = torch.cat([cls, regs], dim=-1) # [B, (1+R)*D]
269
+ return self.projection(features.float()) # fp32 for stability
270
+
271
+
272
+ # =============================================================================
273
+ # Image preprocessing
274
+ # =============================================================================
275
+
276
+ _IMAGENET_MEAN = [0.485, 0.456, 0.406]
277
+ _IMAGENET_STD = [0.229, 0.224, 0.225]
278
+
279
+
280
+ def _snap(x: int, m: int) -> int:
281
+ return max(m, (x // m) * m)
282
+
283
+
284
+ def _open_image(source) -> Image.Image:
285
+ s = str(source)
286
+ if s.startswith("http://") or s.startswith("https://"):
287
+ r = requests.get(s, timeout=30)
288
+ r.raise_for_status()
289
+ return Image.open(BytesIO(r.content)).convert("RGB")
290
+ return Image.open(source).convert("RGB")
291
+
292
+
293
+ def preprocess_image(source, max_size: int = 1024) -> torch.Tensor:
294
+ """Load and preprocess an image → [1, 3, H, W] float32, ImageNet-normalised."""
295
+ img = _open_image(source)
296
+ w, h = img.size
297
+ scale = min(1.0, max_size / max(w, h))
298
+ new_w = _snap(round(w * scale), PATCH_SIZE)
299
+ new_h = _snap(round(h * scale), PATCH_SIZE)
300
+ return v2.Compose([
301
+ v2.Resize((new_h, new_w), interpolation=v2.InterpolationMode.LANCZOS),
302
+ v2.ToImage(),
303
+ v2.ToDtype(torch.float32, scale=True),
304
+ v2.Normalize(mean=_IMAGENET_MEAN, std=_IMAGENET_STD),
305
+ ])(img).unsqueeze(0)
306
+
307
+
308
+ # =============================================================================
309
+ # Tagger wrapper
310
+ # =============================================================================
311
+
312
+ class Tagger:
313
+ """Inference wrapper for DINOv3Tagger (ViT-H/16+).
314
+
315
+ Parameters
316
+ ----------
317
+ checkpoint_path : str
318
+ Path to a .safetensors or .pth checkpoint saved by TaggerTrainer.
319
+ vocab_path : str
320
+ Path to tagger_vocab.json ({"idx2tag": [...]}).
321
+ device : str
322
+ "cuda", "cuda:0", "cpu", etc.
323
+ dtype : torch.dtype
324
+ bfloat16 recommended on Ampere+; float16 for older GPUs; float32 for CPU.
325
+ max_size : int
326
+ Long-edge cap in pixels before feeding to the model.
327
+ """
328
+
329
+ def __init__(
330
+ self,
331
+ checkpoint_path: str,
332
+ vocab_path: str,
333
+ device: str = "cuda",
334
+ dtype: torch.dtype = torch.bfloat16,
335
+ max_size: int = 1024,
336
+ ):
337
+ self.device = torch.device(device if torch.cuda.is_available() or device == "cpu" else "cpu")
338
+ self.dtype = dtype
339
+ self.max_size = max_size
340
+
341
+ with open(vocab_path) as f:
342
+ data = json.load(f)
343
+ self.idx2tag: list[str] = data["idx2tag"]
344
+ self.num_tags = len(self.idx2tag)
345
+ print(f"[Tagger] Vocabulary: {self.num_tags:,} tags")
346
+
347
+ self.model = DINOv3Tagger(num_tags=self.num_tags)
348
+
349
+ print(f"[Tagger] Loading checkpoint: {checkpoint_path}")
350
+ if checkpoint_path.endswith((".safetensors", ".sft")):
351
+ sd = load_file(checkpoint_path, device=str(self.device))
352
+ else:
353
+ sd = torch.load(checkpoint_path, map_location=str(self.device))
354
+
355
+ missing, unexpected = self.model.load_state_dict(sd, strict=False, assign=True)
356
+ if missing:
357
+ print(f"[Tagger] Missing keys ({len(missing)}): {missing[:5]}{'...' if len(missing) > 5 else ''}")
358
+ if unexpected:
359
+ print(f"[Tagger] Unexpected keys ({len(unexpected)}): {unexpected[:5]}{'...' if len(unexpected) > 5 else ''}")
360
+
361
+ self.model.backbone = self.model.backbone.to(dtype=dtype)
362
+ self.model = self.model.to(self.device)
363
+ self.model.eval()
364
+ print(f"[Tagger] Ready on {self.device} ({dtype})")
365
+
366
+ @torch.no_grad()
367
+ def predict(self, image, topk: int | None = 30,
368
+ threshold: float | None = None) -> list[tuple[str, float]]:
369
+ """Tag a single image (local path or URL).
370
+ Specify either topk OR threshold. Returns [(tag, score), ...] desc."""
371
+ if topk is None and threshold is None:
372
+ topk = 30
373
+
374
+ pv = preprocess_image(image, max_size=self.max_size).to(self.device)
375
+ with torch.autocast(device_type=self.device.type, dtype=self.dtype):
376
+ logits = self.model(pv)[0]
377
+ scores = torch.sigmoid(logits.float())
378
+
379
+ if topk is not None:
380
+ values, indices = scores.topk(min(topk, self.num_tags))
381
+ else:
382
+ assert threshold is not None
383
+ indices = (scores >= threshold).nonzero(as_tuple=True)[0]
384
+ values = scores[indices]
385
+ order = values.argsort(descending=True)
386
+ indices, values = indices[order], values[order]
387
+
388
+ return [(self.idx2tag[i], float(v)) for i, v in zip(indices.tolist(), values.tolist())]
389
+
390
+ @torch.no_grad()
391
+ def predict_batch(self, images, topk: int | None = 30,
392
+ threshold: float | None = None) -> list[list[tuple[str, float]]]:
393
+ """Tag multiple images (processed individually for mixed resolutions)."""
394
+ return [self.predict(img, topk=topk, threshold=threshold) for img in images]
395
+
396
+
397
+ # =============================================================================
398
+ # Output formatters
399
+ # =============================================================================
400
+
401
+ def _fmt_pretty(path: str, results) -> str:
402
+ lines = [f"\n{'─' * 60}", f" {path}", f"{'─' * 60}"]
403
+ for rank, (tag, score) in enumerate(results, 1):
404
+ bar = "█" * int(score * 20)
405
+ lines.append(f" {rank:>3}. {score:.3f} {bar:<20} {tag}")
406
+ return "\n".join(lines)
407
+
408
+ def _fmt_tags(results) -> str:
409
+ return ", ".join(tag for tag, _ in results)
410
+
411
+ def _fmt_json(path: str, results) -> dict:
412
+ return {"file": path, "tags": [{"tag": t, "score": round(s, 4)} for t, s in results]}
413
+
414
+
415
+ # =============================================================================
416
+ # CLI
417
+ # =============================================================================
418
+
419
+ def main():
420
+ parser = argparse.ArgumentParser(
421
+ description="DINOv3 ViT-H/16+ tagger inference (standalone, no transformers dep)",
422
+ formatter_class=argparse.RawDescriptionHelpFormatter,
423
+ )
424
+ parser.add_argument("--checkpoint", required=True, help="Path to .safetensors or .pth checkpoint")
425
+ parser.add_argument("--vocab", required=True, help="Path to tagger_vocab.json")
426
+ parser.add_argument("--images", nargs="+", required=True, help="Image paths and/or http(s) URLs")
427
+ parser.add_argument("--device", default="cuda", help="Device: cuda, cuda:0, cpu, … (default: cuda)")
428
+ parser.add_argument("--max-size", type=int, default=1024,
429
+ help="Long-edge cap in pixels, multiple of 16 (default: 1024)")
430
+
431
+ mode = parser.add_mutually_exclusive_group()
432
+ mode.add_argument("--topk", type=int, default=30, help="Return top-k tags (default: 30)")
433
+ mode.add_argument("--threshold", type=float, help="Return all tags with score >= threshold")
434
+
435
+ parser.add_argument("--format", choices=["pretty", "tags", "json"],
436
+ default="pretty", help="Output format (default: pretty)")
437
+ args = parser.parse_args()
438
+
439
+ tagger = Tagger(checkpoint_path=args.checkpoint, vocab_path=args.vocab,
440
+ device=args.device, max_size=args.max_size)
441
+
442
+ topk, threshold = (None, args.threshold) if args.threshold else (args.topk, None)
443
+ json_out = []
444
+
445
+ for src in args.images:
446
+ is_url = str(src).startswith("http://") or str(src).startswith("https://")
447
+ if not is_url and not Path(src).exists():
448
+ print(f"[warning] File not found: {src}", file=sys.stderr)
449
+ continue
450
+ results = tagger.predict(src, topk=topk, threshold=threshold)
451
+ if args.format == "pretty": print(_fmt_pretty(src, results))
452
+ elif args.format == "tags": print(_fmt_tags(results))
453
+ elif args.format == "json": json_out.append(_fmt_json(src, results))
454
+
455
+ if args.format == "json":
456
+ print(json.dumps(json_out, indent=2, ensure_ascii=False))
457
+
458
+
459
+ if __name__ == "__main__":
460
+ main()