hysts HF Staff commited on
Commit
28bfd14
·
0 Parent(s):

Initial commit

Browse files
Files changed (10) hide show
  1. .gitattributes +36 -0
  2. .gitignore +162 -0
  3. .python-version +1 -0
  4. README.md +13 -0
  5. app.py +304 -0
  6. pyproject.toml +64 -0
  7. requirements.txt +301 -0
  8. style.css +4 -0
  9. transformers-5.5.0.dev0-py3-none-any.whl +3 -0
  10. uv.lock +0 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.whl filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .gradio/
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.12
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Gemma 4 31B It
3
+ emoji: 🚀
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 6.10.0
8
+ python_version: "3.12.12"
9
+ app_file: app.py
10
+ pinned: false
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+ from collections.abc import Iterator
5
+ from pathlib import Path
6
+ from threading import Thread
7
+
8
+ # Install pre-built transformers wheel (excluded from requirements.txt
9
+ # because HF Spaces cannot pip-install local wheel paths).
10
+ if os.getenv("SPACE_ID"):
11
+ _whl = Path(__file__).parent / "transformers-5.5.0.dev0-py3-none-any.whl"
12
+ subprocess.check_call([sys.executable, "-m", "pip", "install", str(_whl)]) # noqa: S603 - trusted, hardcoded args
13
+
14
+ import gradio as gr
15
+ import spaces
16
+ import torch
17
+ from transformers import AutoModelForMultimodalLM, AutoProcessor, BatchFeature
18
+ from transformers.generation.streamers import TextIteratorStreamer
19
+
20
+ MODEL_ID = "gg-hf-gg/gemma-4-31b-it"
21
+
22
+ processor = AutoProcessor.from_pretrained(MODEL_ID, use_fast=False)
23
+ model = AutoModelForMultimodalLM.from_pretrained(MODEL_ID, device_map="auto", dtype=torch.bfloat16)
24
+
25
+ IMAGE_FILE_TYPES = (".jpg", ".jpeg", ".png", ".webp")
26
+ VIDEO_FILE_TYPES = (".mp4", ".mov", ".avi", ".webm")
27
+ MAX_INPUT_TOKENS = int(os.getenv("MAX_INPUT_TOKENS", "10_000"))
28
+
29
+ THINKING_START = "<|channel>"
30
+ THINKING_END = "<channel|>"
31
+
32
+ # Special tokens to strip from decoded output (keeping thinking delimiters
33
+ # so that Gradio's reasoning_tags can find them on the frontend).
34
+ _KEEP_TOKENS = {THINKING_START, THINKING_END}
35
+ _STRIP_TOKENS = sorted(
36
+ (t for t in processor.tokenizer.all_special_tokens if t not in _KEEP_TOKENS),
37
+ key=len,
38
+ reverse=True, # longest first to avoid partial matches
39
+ )
40
+
41
+
42
+ def _strip_special_tokens(text: str) -> str:
43
+ for tok in _STRIP_TOKENS:
44
+ text = text.replace(tok, "")
45
+ return text
46
+
47
+
48
+ def _classify_file(path: str) -> str | None:
49
+ """Return media type string for a file path, or None if unsupported."""
50
+ lower = path.lower()
51
+ if lower.endswith(IMAGE_FILE_TYPES):
52
+ return "image"
53
+ if lower.endswith(VIDEO_FILE_TYPES):
54
+ return "video"
55
+ return None
56
+
57
+
58
+ def process_new_user_message(message: dict) -> list[dict]:
59
+ """Build content list from the new user message with URL-based media references."""
60
+ content: list[dict] = []
61
+ for path in message.get("files", []):
62
+ kind = _classify_file(path)
63
+ if kind:
64
+ content.append({"type": kind, "url": path})
65
+ content.append({"type": "text", "text": message.get("text", "")})
66
+ return content
67
+
68
+
69
+ def process_history(history: list[dict]) -> list[dict]:
70
+ """Walk Gradio 6 history and build message list with URL-based media references."""
71
+ messages: list[dict] = []
72
+
73
+ for item in history:
74
+ if item["role"] == "assistant":
75
+ text_parts = [p["text"] for p in item["content"] if p.get("type") == "text"]
76
+ messages.append(
77
+ {
78
+ "role": "assistant",
79
+ "content": [{"type": "text", "text": " ".join(text_parts)}],
80
+ }
81
+ )
82
+ else:
83
+ user_content: list[dict] = []
84
+ for part in item["content"]:
85
+ if part.get("type") == "text":
86
+ user_content.append({"type": "text", "text": part["text"]})
87
+ elif part.get("type") == "file":
88
+ filepath = part["file"]["path"]
89
+ kind = _classify_file(filepath)
90
+ if kind:
91
+ user_content.append({"type": kind, "url": filepath})
92
+ if user_content:
93
+ messages.append({"role": "user", "content": user_content})
94
+
95
+ return messages
96
+
97
+
98
+ @spaces.GPU(duration=180)
99
+ @torch.inference_mode()
100
+ def _generate_on_gpu(inputs: BatchFeature, max_new_tokens: int, thinking: bool) -> Iterator[str]:
101
+ inputs = inputs.to(device=model.device, dtype=torch.bfloat16)
102
+
103
+ streamer = TextIteratorStreamer(
104
+ processor,
105
+ timeout=30.0,
106
+ skip_prompt=True,
107
+ skip_special_tokens=not thinking,
108
+ )
109
+ generate_kwargs = {
110
+ **inputs,
111
+ "streamer": streamer,
112
+ "max_new_tokens": max_new_tokens,
113
+ "disable_compile": True,
114
+ }
115
+
116
+ exception_holder: list[Exception] = []
117
+
118
+ def _generate() -> None:
119
+ try:
120
+ model.generate(**generate_kwargs)
121
+ except Exception as e: # noqa: BLE001
122
+ exception_holder.append(e)
123
+
124
+ thread = Thread(target=_generate)
125
+ thread.start()
126
+
127
+ chunks: list[str] = []
128
+ for text in streamer:
129
+ chunks.append(text)
130
+ accumulated = "".join(chunks)
131
+ if thinking:
132
+ yield _strip_special_tokens(accumulated)
133
+ else:
134
+ yield accumulated
135
+
136
+ thread.join()
137
+ if exception_holder:
138
+ msg = f"Generation failed: {exception_holder[0]}"
139
+ raise gr.Error(msg)
140
+
141
+
142
+ def validate_input(message: dict) -> dict:
143
+ has_text = bool(message.get("text", "").strip())
144
+ has_files = bool(message.get("files"))
145
+ if not (has_text or has_files):
146
+ return gr.validate(has_text, "Please enter a message or upload a file.")
147
+
148
+ files = message.get("files", [])
149
+ kinds = [_classify_file(f) for f in files]
150
+ kinds = [k for k in kinds if k is not None]
151
+ unique_kinds = set(kinds)
152
+
153
+ if len(unique_kinds) > 1:
154
+ return gr.validate(has_text, "Please upload only one type of media (images or video) at a time.")
155
+ if kinds.count("video") > 1:
156
+ return gr.validate(has_text, "Only one video file can be uploaded at a time.")
157
+
158
+ return gr.validate(has_text or has_files, "")
159
+
160
+
161
+ def _has_media_type(messages: list[dict], media_type: str) -> bool:
162
+ """Check if any message contains a content entry of the given media type."""
163
+ return any(
164
+ c.get("type") == media_type for m in messages for c in (m["content"] if isinstance(m["content"], list) else [])
165
+ )
166
+
167
+
168
+ def generate(
169
+ message: dict,
170
+ history: list[dict],
171
+ thinking: bool = False,
172
+ max_new_tokens: int = 1024,
173
+ system_prompt: str = "",
174
+ ) -> Iterator[str]:
175
+
176
+ messages: list[dict] = []
177
+ if system_prompt:
178
+ messages.append({"role": "system", "content": [{"type": "text", "text": system_prompt}]})
179
+
180
+ messages.extend(process_history(history))
181
+ messages.append({"role": "user", "content": process_new_user_message(message)})
182
+
183
+ template_kwargs: dict = {
184
+ "tokenize": True,
185
+ "return_dict": True,
186
+ "return_tensors": "pt",
187
+ "add_generation_prompt": True,
188
+ }
189
+ if _has_media_type(messages, "video"):
190
+ template_kwargs["load_audio_from_video"] = False
191
+ if thinking:
192
+ template_kwargs["enable_thinking"] = True
193
+
194
+ inputs = processor.apply_chat_template(messages, **template_kwargs)
195
+
196
+ n_tokens = inputs["input_ids"].shape[1]
197
+ if n_tokens > MAX_INPUT_TOKENS:
198
+ msg = f"Input too long ({n_tokens} tokens). Maximum is {MAX_INPUT_TOKENS} tokens."
199
+ raise gr.Error(msg)
200
+
201
+ yield from _generate_on_gpu(inputs=inputs, max_new_tokens=max_new_tokens, thinking=thinking)
202
+
203
+
204
+ examples = [
205
+ # --- Text-only examples ---
206
+ [
207
+ {
208
+ "text": "What is the capital of France?",
209
+ "files": [],
210
+ }
211
+ ],
212
+ [
213
+ {
214
+ "text": "What is the water formula?",
215
+ "files": [],
216
+ }
217
+ ],
218
+ [
219
+ {
220
+ "text": "Explain quantum entanglement in simple terms.",
221
+ "files": [],
222
+ }
223
+ ],
224
+ [
225
+ {
226
+ "text": "I want to do a car wash that is 50 meters away, should I walk or drive?",
227
+ "files": [],
228
+ }
229
+ ],
230
+ [
231
+ {
232
+ "text": "Write a poem about beer with 4 stanzas. Format the title as an H2 markdown heading and bold the first line of each stanza.",
233
+ "files": [],
234
+ }
235
+ ],
236
+ # --- Single-image examples ---
237
+ [
238
+ {
239
+ "text": "Describe this image.",
240
+ "files": ["https://news.bbc.co.uk/media/images/38107000/jpg/_38107299_ronaldogoal_ap_300.jpg"],
241
+ }
242
+ ],
243
+ [
244
+ {
245
+ "text": "What is the city in this image? Describe what you see.",
246
+ "files": ["https://imgmd.net/images/v1/guia/1698673/rio-de-janeiro-4-c.jpg"],
247
+ }
248
+ ],
249
+ # --- Multi-image examples ---
250
+ [
251
+ {
252
+ "text": "What are the key similarities between these three images?",
253
+ "files": [
254
+ "https://news.bbc.co.uk/media/images/38107000/jpg/_38107299_ronaldogoal_ap_300.jpg",
255
+ "https://ogimg.infoglobo.com.br/in/12547538-502-0e0/FT1086A/94-8705-14.jpg",
256
+ "https://amazonasatual.com.br/wp-content/uploads/2021/01/Pele.jpg",
257
+ ],
258
+ }
259
+ ],
260
+ # --- Video examples ---
261
+ [
262
+ {
263
+ "text": "What is happening in this video?",
264
+ "files": ["https://huggingface.co/datasets/merve/vlm_test_images/resolve/main/concert.mp4"],
265
+ }
266
+ ],
267
+ ]
268
+
269
+ demo = gr.ChatInterface(
270
+ fn=generate,
271
+ validator=validate_input,
272
+ chatbot=gr.Chatbot(
273
+ scale=1,
274
+ latex_delimiters=[
275
+ {"left": "$$", "right": "$$", "display": True},
276
+ {"left": "$", "right": "$", "display": False},
277
+ {"left": "\\(", "right": "\\)", "display": False},
278
+ {"left": "\\[", "right": "\\]", "display": True},
279
+ ],
280
+ reasoning_tags=[(THINKING_START, THINKING_END)],
281
+ ),
282
+ textbox=gr.MultimodalTextbox(
283
+ sources=["upload"],
284
+ file_types=[*IMAGE_FILE_TYPES, *VIDEO_FILE_TYPES],
285
+ file_count="multiple",
286
+ autofocus=True,
287
+ ),
288
+ multimodal=True,
289
+ additional_inputs=[
290
+ gr.Checkbox(label="Thinking", value=False),
291
+ gr.Slider(label="Max New Tokens", minimum=100, maximum=4000, step=10, value=1024),
292
+ gr.Textbox(label="System Prompt", value=""),
293
+ ],
294
+ additional_inputs_accordion=gr.Accordion("Settings", open=True),
295
+ stop_btn=False,
296
+ title="Gemma 4 31B It",
297
+ examples=examples,
298
+ run_examples_on_click=False,
299
+ cache_examples=False,
300
+ delete_cache=(1800, 1800),
301
+ )
302
+
303
+ if __name__ == "__main__":
304
+ demo.launch(css_paths="style.css", max_file_size="20mb")
pyproject.toml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "gemma-4-31b-it"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "accelerate>=1.13.0",
9
+ "gradio>=6.9.0",
10
+ "spaces>=0.47.0",
11
+ "torch==2.9.1",
12
+ "torchcodec>=0.9.0,<0.10",
13
+ "torchvision>=0.24.1",
14
+ ]
15
+
16
+ [tool.ruff]
17
+ line-length = 119
18
+
19
+ [tool.ruff.lint]
20
+ select = ["ALL"]
21
+ ignore = [
22
+ "COM812", # missing-trailing-comma
23
+ "D203", # one-blank-line-before-class
24
+ "D213", # multi-line-summary-second-line
25
+ "E501", # line-too-long
26
+ "SIM117", # multiple-with-statements
27
+ #
28
+ "D100", # undocumented-public-module
29
+ "D101", # undocumented-public-class
30
+ "D102", # undocumented-public-method
31
+ "D103", # undocumented-public-function
32
+ "D104", # undocumented-public-package
33
+ "D105", # undocumented-magic-method
34
+ "D107", # undocumented-public-init
35
+ "EM101", # raw-string-in-exception
36
+ "FBT001", # boolean-type-hint-positional-argument
37
+ "FBT002", # boolean-default-value-positional-argument
38
+ "ISC001", # single-line-implicit-string-concatenation
39
+ "PGH003", # blanket-type-ignore
40
+ "PLR0913", # too-many-arguments
41
+ "PLR0915", # too-many-statements
42
+ "TRY003", # raise-vanilla-args
43
+ ]
44
+ unfixable = [
45
+ "F401", # unused-import
46
+ ]
47
+
48
+ [tool.ruff.lint.pydocstyle]
49
+ convention = "google"
50
+
51
+ [tool.ruff.format]
52
+ docstring-code-format = true
53
+
54
+ [tool.uv.sources]
55
+ transformers = { path = "transformers-5.5.0.dev0-py3-none-any.whl" }
56
+
57
+ [dependency-groups]
58
+ dev = [
59
+ "ruff>=0.15.6",
60
+ "transformers",
61
+ ]
62
+ hf-spaces = [
63
+ "datasets>=4.7.0",
64
+ ]
requirements.txt ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv export --no-hashes --no-dev --group hf-spaces --no-emit-package typer-slim --no-emit-package spaces -o requirements.txt
3
+ accelerate==1.13.0
4
+ # via gemma-4-31b-it
5
+ aiofiles==24.1.0
6
+ # via gradio
7
+ aiohappyeyeballs==2.6.1
8
+ # via aiohttp
9
+ aiohttp==3.13.5
10
+ # via fsspec
11
+ aiosignal==1.4.0
12
+ # via aiohttp
13
+ annotated-doc==0.0.4
14
+ # via
15
+ # fastapi
16
+ # typer
17
+ annotated-types==0.7.0
18
+ # via pydantic
19
+ anyio==4.13.0
20
+ # via
21
+ # gradio
22
+ # httpx
23
+ # starlette
24
+ attrs==26.1.0
25
+ # via aiohttp
26
+ audioop-lts==0.2.2 ; python_full_version >= '3.13'
27
+ # via gradio
28
+ brotli==1.2.0
29
+ # via gradio
30
+ certifi==2026.2.25
31
+ # via
32
+ # httpcore
33
+ # httpx
34
+ # requests
35
+ charset-normalizer==3.4.6
36
+ # via requests
37
+ click==8.3.1
38
+ # via
39
+ # typer
40
+ # uvicorn
41
+ colorama==0.4.6 ; sys_platform == 'win32'
42
+ # via
43
+ # click
44
+ # tqdm
45
+ datasets==4.8.4
46
+ dill==0.4.1
47
+ # via
48
+ # datasets
49
+ # multiprocess
50
+ fastapi==0.135.3
51
+ # via gradio
52
+ ffmpy==1.0.0
53
+ # via gradio
54
+ filelock==3.25.2
55
+ # via
56
+ # datasets
57
+ # huggingface-hub
58
+ # torch
59
+ frozenlist==1.8.0
60
+ # via
61
+ # aiohttp
62
+ # aiosignal
63
+ fsspec==2026.2.0
64
+ # via
65
+ # datasets
66
+ # gradio-client
67
+ # huggingface-hub
68
+ # torch
69
+ gradio==6.10.0
70
+ # via
71
+ # gemma-4-31b-it
72
+ # spaces
73
+ gradio-client==2.4.0
74
+ # via
75
+ # gradio
76
+ # hf-gradio
77
+ groovy==0.1.2
78
+ # via gradio
79
+ h11==0.16.0
80
+ # via
81
+ # httpcore
82
+ # uvicorn
83
+ hf-gradio==0.3.0
84
+ # via gradio
85
+ hf-xet==1.4.3 ; platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
86
+ # via huggingface-hub
87
+ httpcore==1.0.9
88
+ # via httpx
89
+ httpx==0.28.1
90
+ # via
91
+ # datasets
92
+ # gradio
93
+ # gradio-client
94
+ # huggingface-hub
95
+ # safehttpx
96
+ # spaces
97
+ huggingface-hub==1.8.0
98
+ # via
99
+ # accelerate
100
+ # datasets
101
+ # gradio
102
+ # gradio-client
103
+ idna==3.11
104
+ # via
105
+ # anyio
106
+ # httpx
107
+ # requests
108
+ # yarl
109
+ jinja2==3.1.6
110
+ # via
111
+ # gradio
112
+ # torch
113
+ markdown-it-py==4.0.0
114
+ # via rich
115
+ markupsafe==3.0.3
116
+ # via
117
+ # gradio
118
+ # jinja2
119
+ mdurl==0.1.2
120
+ # via markdown-it-py
121
+ mpmath==1.3.0
122
+ # via sympy
123
+ multidict==6.7.1
124
+ # via
125
+ # aiohttp
126
+ # yarl
127
+ multiprocess==0.70.19
128
+ # via datasets
129
+ networkx==3.6.1
130
+ # via torch
131
+ numpy==2.4.4
132
+ # via
133
+ # accelerate
134
+ # datasets
135
+ # gradio
136
+ # pandas
137
+ # torchvision
138
+ nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
139
+ # via
140
+ # nvidia-cudnn-cu12
141
+ # nvidia-cusolver-cu12
142
+ # torch
143
+ nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
144
+ # via torch
145
+ nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
146
+ # via torch
147
+ nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
148
+ # via torch
149
+ nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux'
150
+ # via torch
151
+ nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux'
152
+ # via torch
153
+ nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux'
154
+ # via torch
155
+ nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
156
+ # via torch
157
+ nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
158
+ # via torch
159
+ nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
160
+ # via
161
+ # nvidia-cusolver-cu12
162
+ # torch
163
+ nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
164
+ # via torch
165
+ nvidia-nccl-cu12==2.27.5 ; platform_machine == 'x86_64' and sys_platform == 'linux'
166
+ # via torch
167
+ nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
168
+ # via
169
+ # nvidia-cufft-cu12
170
+ # nvidia-cusolver-cu12
171
+ # nvidia-cusparse-cu12
172
+ # torch
173
+ nvidia-nvshmem-cu12==3.3.20 ; platform_machine == 'x86_64' and sys_platform == 'linux'
174
+ # via torch
175
+ nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
176
+ # via torch
177
+ orjson==3.11.8
178
+ # via gradio
179
+ packaging==26.0
180
+ # via
181
+ # accelerate
182
+ # datasets
183
+ # gradio
184
+ # gradio-client
185
+ # huggingface-hub
186
+ # spaces
187
+ pandas==3.0.2
188
+ # via
189
+ # datasets
190
+ # gradio
191
+ pillow==12.2.0
192
+ # via
193
+ # gradio
194
+ # torchvision
195
+ propcache==0.4.1
196
+ # via
197
+ # aiohttp
198
+ # yarl
199
+ psutil==5.9.8
200
+ # via
201
+ # accelerate
202
+ # spaces
203
+ pyarrow==23.0.1
204
+ # via datasets
205
+ pydantic==2.12.5
206
+ # via
207
+ # fastapi
208
+ # gradio
209
+ # spaces
210
+ pydantic-core==2.41.5
211
+ # via pydantic
212
+ pydub==0.25.1
213
+ # via gradio
214
+ pygments==2.20.0
215
+ # via rich
216
+ python-dateutil==2.9.0.post0
217
+ # via pandas
218
+ python-multipart==0.0.22
219
+ # via gradio
220
+ pytz==2026.1.post1
221
+ # via gradio
222
+ pyyaml==6.0.3
223
+ # via
224
+ # accelerate
225
+ # datasets
226
+ # gradio
227
+ # huggingface-hub
228
+ requests==2.33.1
229
+ # via
230
+ # datasets
231
+ # spaces
232
+ rich==14.3.3
233
+ # via typer
234
+ safehttpx==0.1.7
235
+ # via gradio
236
+ safetensors==0.7.0
237
+ # via accelerate
238
+ semantic-version==2.10.0
239
+ # via gradio
240
+ setuptools==82.0.1
241
+ # via torch
242
+ shellingham==1.5.4
243
+ # via typer
244
+ six==1.17.0
245
+ # via python-dateutil
246
+ starlette==0.52.1
247
+ # via
248
+ # fastapi
249
+ # gradio
250
+ sympy==1.14.0
251
+ # via torch
252
+ tomlkit==0.13.3
253
+ # via gradio
254
+ torch==2.9.1
255
+ # via
256
+ # accelerate
257
+ # gemma-4-31b-it
258
+ # torchvision
259
+ torchcodec==0.9.1
260
+ # via gemma-4-31b-it
261
+ torchvision==0.24.1
262
+ # via gemma-4-31b-it
263
+ tqdm==4.67.3
264
+ # via
265
+ # datasets
266
+ # huggingface-hub
267
+ triton==3.5.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
268
+ # via torch
269
+ typer==0.24.1
270
+ # via
271
+ # gradio
272
+ # hf-gradio
273
+ # huggingface-hub
274
+ typing-extensions==4.15.0
275
+ # via
276
+ # aiosignal
277
+ # anyio
278
+ # fastapi
279
+ # gradio
280
+ # gradio-client
281
+ # huggingface-hub
282
+ # pydantic
283
+ # pydantic-core
284
+ # spaces
285
+ # starlette
286
+ # torch
287
+ # typing-inspection
288
+ typing-inspection==0.4.2
289
+ # via
290
+ # fastapi
291
+ # pydantic
292
+ tzdata==2025.3 ; sys_platform == 'emscripten' or sys_platform == 'win32'
293
+ # via pandas
294
+ urllib3==2.6.3
295
+ # via requests
296
+ uvicorn==0.42.0
297
+ # via gradio
298
+ xxhash==3.6.0
299
+ # via datasets
300
+ yarl==1.23.0
301
+ # via aiohttp
style.css ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ display: block;
4
+ }
transformers-5.5.0.dev0-py3-none-any.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dec12c61168baf629d9dbd43adac7232dfc1b72c23601f53472c1f0647678be
3
+ size 11329463
uv.lock ADDED
The diff for this file is too large to render. See raw diff