Spaces:
Running on Zero
Running on Zero
Deploybot commited on
Commit ·
49574d5
0
Parent(s):
Deploy from stable branch
Browse files- .gitattributes +35 -0
- .github/pull_request_template.md +17 -0
- .gitignore +173 -0
- .gitlint +143 -0
- .pre-commit-config.yaml +51 -0
- DEVELOPMENT.md +176 -0
- README.md +11 -0
- poetry.lock +0 -0
- pyproject.toml +66 -0
- requirements.txt +14 -0
- src/app.css +8 -0
- src/app.py +550 -0
- src/app_head.html +12 -0
- src/crops.py +71 -0
- src/document_parser.py +146 -0
- src/infer_chart2csv.py +106 -0
- src/infer_vision_qa.py +155 -0
- src/model_loader.py +87 -0
- src/pdf_io.py +57 -0
- src/themes/carbon.py +147 -0
- src/themes/research_monochrome.py +152 -0
- src/ui_state.py +43 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.github/pull_request_template.md
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Thank you for your pull request. Please review and complete the sections below.
|
| 3 |
+
-->
|
| 4 |
+
|
| 5 |
+
### Description
|
| 6 |
+
|
| 7 |
+
<!-- Provide a description of the change or tick the promote to stable box -->
|
| 8 |
+
|
| 9 |
+
- [ ] Promote QA to Stable
|
| 10 |
+
|
| 11 |
+
### Checklist
|
| 12 |
+
|
| 13 |
+
<!-- For completed items, change [ ] to [x]. -->
|
| 14 |
+
|
| 15 |
+
- [ ] I have installed pre-commit: `pre-commit install`
|
| 16 |
+
- [ ] All pre-commit checks pass: `pre-commit run`
|
| 17 |
+
- [ ] Dependencies are [compatible with ZeroGPU](https://huggingface.co/docs/hub/en/spaces-zerogpu#supported-versions)
|
.gitignore
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
|
| 110 |
+
# pdm
|
| 111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 112 |
+
#pdm.lock
|
| 113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 114 |
+
# in version control.
|
| 115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 116 |
+
.pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 121 |
+
__pypackages__/
|
| 122 |
+
|
| 123 |
+
# Celery stuff
|
| 124 |
+
celerybeat-schedule
|
| 125 |
+
celerybeat.pid
|
| 126 |
+
|
| 127 |
+
# SageMath parsed files
|
| 128 |
+
*.sage.py
|
| 129 |
+
|
| 130 |
+
# Environments
|
| 131 |
+
.env
|
| 132 |
+
.venv
|
| 133 |
+
env/
|
| 134 |
+
venv/
|
| 135 |
+
ENV/
|
| 136 |
+
env.bak/
|
| 137 |
+
venv.bak/
|
| 138 |
+
|
| 139 |
+
# Spyder project settings
|
| 140 |
+
.spyderproject
|
| 141 |
+
.spyproject
|
| 142 |
+
|
| 143 |
+
# Rope project settings
|
| 144 |
+
.ropeproject
|
| 145 |
+
|
| 146 |
+
# mkdocs documentation
|
| 147 |
+
/site
|
| 148 |
+
|
| 149 |
+
# mypy
|
| 150 |
+
.mypy_cache/
|
| 151 |
+
.dmypy.json
|
| 152 |
+
dmypy.json
|
| 153 |
+
|
| 154 |
+
# Pyre type checker
|
| 155 |
+
.pyre/
|
| 156 |
+
|
| 157 |
+
# pytype static type analyzer
|
| 158 |
+
.pytype/
|
| 159 |
+
|
| 160 |
+
# Cython debug symbols
|
| 161 |
+
cython_debug/
|
| 162 |
+
|
| 163 |
+
# PyCharm
|
| 164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 168 |
+
#.idea/
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# Boilerplate specific ignores
|
| 172 |
+
.gradio/
|
| 173 |
+
.ruff_cache/
|
.gitlint
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Edit this file as you like.
|
| 2 |
+
#
|
| 3 |
+
# All these sections are optional. Each section with the exception of [general] represents
|
| 4 |
+
# one rule and each key in it is an option for that specific rule.
|
| 5 |
+
#
|
| 6 |
+
# Rules and sections can be referenced by their full name or by id. For example
|
| 7 |
+
# section "[body-max-line-length]" could also be written as "[B1]". Full section names are
|
| 8 |
+
# used in here for clarity.
|
| 9 |
+
#
|
| 10 |
+
# [general]
|
| 11 |
+
# Ignore certain rules, this example uses both full name and id
|
| 12 |
+
# ignore=title-trailing-punctuation, T3
|
| 13 |
+
|
| 14 |
+
[general]
|
| 15 |
+
# You HAVE to add the rule here to enable it, only configuring (such as below)
|
| 16 |
+
# does NOT enable it.
|
| 17 |
+
contrib=contrib-title-conventional-commits
|
| 18 |
+
|
| 19 |
+
[contrib-title-conventional-commits]
|
| 20 |
+
# Specify allowed commit types. For details see: https://www.conventionalcommits.org/
|
| 21 |
+
types = build,chore,ci,docs,feat,fix,perf,refactor,revert,style,test
|
| 22 |
+
|
| 23 |
+
# verbosity should be a value between 1 and 3, the commandline -v flags take precedence over this
|
| 24 |
+
# verbosity = 2
|
| 25 |
+
|
| 26 |
+
# By default gitlint will ignore merge, revert, fixup and squash commits.
|
| 27 |
+
# ignore-merge-commits=true
|
| 28 |
+
# ignore-revert-commits=true
|
| 29 |
+
# ignore-fixup-commits=true
|
| 30 |
+
# ignore-squash-commits=true
|
| 31 |
+
|
| 32 |
+
# Ignore any data send to gitlint via stdin
|
| 33 |
+
# ignore-stdin=true
|
| 34 |
+
|
| 35 |
+
# Fetch additional meta-data from the local repository when manually passing a
|
| 36 |
+
# commit message to gitlint via stdin or --commit-msg. Disabled by default.
|
| 37 |
+
# staged=true
|
| 38 |
+
|
| 39 |
+
# Hard fail when the target commit range is empty. Note that gitlint will
|
| 40 |
+
# already fail by default on invalid commit ranges. This option is specifically
|
| 41 |
+
# to tell gitlint to fail on *valid but empty* commit ranges.
|
| 42 |
+
# Disabled by default.
|
| 43 |
+
# fail-without-commits=true
|
| 44 |
+
|
| 45 |
+
# Enable debug mode (prints more output). Disabled by default.
|
| 46 |
+
# debug=true
|
| 47 |
+
|
| 48 |
+
# Enable community contributed rules
|
| 49 |
+
# See http://jorisroovers.github.io/gitlint/contrib_rules for details
|
| 50 |
+
# contrib=contrib-title-conventional-commits,CC1
|
| 51 |
+
|
| 52 |
+
# Set the extra-path where gitlint will search for user defined rules
|
| 53 |
+
# See http://jorisroovers.github.io/gitlint/user_defined_rules for details
|
| 54 |
+
# extra-path=examples/
|
| 55 |
+
|
| 56 |
+
# This is an example of how to configure the "title-max-length" rule and
|
| 57 |
+
# set the line-length it enforces to 50
|
| 58 |
+
# [title-max-length]
|
| 59 |
+
# line-length=50
|
| 60 |
+
|
| 61 |
+
# Conversely, you can also enforce minimal length of a title with the
|
| 62 |
+
# "title-min-length" rule:
|
| 63 |
+
# [title-min-length]
|
| 64 |
+
# min-length=5
|
| 65 |
+
|
| 66 |
+
# [title-must-not-contain-word]
|
| 67 |
+
# Comma-separated list of words that should not occur in the title. Matching is case
|
| 68 |
+
# insensitive. It's fine if the keyword occurs as part of a larger word (so "WIPING"
|
| 69 |
+
# will not cause a violation, but "WIP: my title" will.
|
| 70 |
+
# words=wip
|
| 71 |
+
|
| 72 |
+
# [title-match-regex]
|
| 73 |
+
# python-style regex that the commit-msg title must match
|
| 74 |
+
# Note that the regex can contradict with other rules if not used correctly
|
| 75 |
+
# (e.g. title-must-not-contain-word).
|
| 76 |
+
# regex=^US[0-9]*
|
| 77 |
+
|
| 78 |
+
# [body-max-line-length]
|
| 79 |
+
# line-length=72
|
| 80 |
+
|
| 81 |
+
# [body-min-length]
|
| 82 |
+
# min-length=5
|
| 83 |
+
|
| 84 |
+
# [body-is-missing]
|
| 85 |
+
# Whether to ignore this rule on merge commits (which typically only have a title)
|
| 86 |
+
# default = True
|
| 87 |
+
# ignore-merge-commits=false
|
| 88 |
+
|
| 89 |
+
# [body-changed-file-mention]
|
| 90 |
+
# List of files that need to be explicitly mentioned in the body when they are changed
|
| 91 |
+
# This is useful for when developers often erroneously edit certain files or git submodules.
|
| 92 |
+
# By specifying this rule, developers can only change the file when they explicitly reference
|
| 93 |
+
# it in the commit message.
|
| 94 |
+
# files=gitlint-core/gitlint/rules.py,README.md
|
| 95 |
+
|
| 96 |
+
# [body-match-regex]
|
| 97 |
+
# python-style regex that the commit-msg body must match.
|
| 98 |
+
# E.g. body must end in My-Commit-Tag: foo
|
| 99 |
+
# regex=My-Commit-Tag: foo$
|
| 100 |
+
|
| 101 |
+
# [author-valid-email]
|
| 102 |
+
# python-style regex that the commit author email address must match.
|
| 103 |
+
# For example, use the following regex if you only want to allow email addresses from foo.com
|
| 104 |
+
# regex=[^@]+@foo.com
|
| 105 |
+
|
| 106 |
+
# [ignore-by-title]
|
| 107 |
+
# Ignore certain rules for commits of which the title matches a regex
|
| 108 |
+
# E.g. Match commit titles that start with "Release"
|
| 109 |
+
# regex=^Release(.*)
|
| 110 |
+
|
| 111 |
+
# Ignore certain rules, you can reference them by their id or by their full name
|
| 112 |
+
# Use 'all' to ignore all rules
|
| 113 |
+
# ignore=T1,body-min-length
|
| 114 |
+
|
| 115 |
+
# [ignore-by-body]
|
| 116 |
+
# Ignore certain rules for commits of which the body has a line that matches a regex
|
| 117 |
+
# E.g. Match bodies that have a line that that contain "release"
|
| 118 |
+
# regex=(.*)release(.*)
|
| 119 |
+
#
|
| 120 |
+
# Ignore certain rules, you can reference them by their id or by their full name
|
| 121 |
+
# Use 'all' to ignore all rules
|
| 122 |
+
# ignore=T1,body-min-length
|
| 123 |
+
|
| 124 |
+
# [ignore-body-lines]
|
| 125 |
+
# Ignore certain lines in a commit body that match a regex.
|
| 126 |
+
# E.g. Ignore all lines that start with 'Co-Authored-By'
|
| 127 |
+
# regex=^Co-Authored-By
|
| 128 |
+
|
| 129 |
+
# [ignore-by-author-name]
|
| 130 |
+
# Ignore certain rules for commits of which the author name matches a regex
|
| 131 |
+
# E.g. Match commits made by dependabot
|
| 132 |
+
# regex=(.*)dependabot(.*)
|
| 133 |
+
#
|
| 134 |
+
# Ignore certain rules, you can reference them by their id or by their full name
|
| 135 |
+
# Use 'all' to ignore all rules
|
| 136 |
+
# ignore=T1,body-min-length
|
| 137 |
+
|
| 138 |
+
# This is a contrib rule - a community contributed rule. These are disabled by default.
|
| 139 |
+
# You need to explicitly enable them one-by-one by adding them to the "contrib" option
|
| 140 |
+
# under [general] section above.
|
| 141 |
+
# [contrib-title-conventional-commits]
|
| 142 |
+
# Specify allowed commit types. For details see: https://www.conventionalcommits.org/
|
| 143 |
+
# types = bugfix,user-story,epic
|
.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 3 |
+
rev: v5.0.0
|
| 4 |
+
hooks:
|
| 5 |
+
- id: check-added-large-files
|
| 6 |
+
- id: check-ast
|
| 7 |
+
- id: check-case-conflict
|
| 8 |
+
- id: check-json
|
| 9 |
+
- id: check-merge-conflict
|
| 10 |
+
- id: check-toml
|
| 11 |
+
- id: end-of-file-fixer
|
| 12 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
| 13 |
+
# Ruff version.
|
| 14 |
+
rev: v0.11.8
|
| 15 |
+
hooks:
|
| 16 |
+
- id: ruff
|
| 17 |
+
- repo: https://github.com/pycqa/isort
|
| 18 |
+
rev: 6.0.1
|
| 19 |
+
hooks:
|
| 20 |
+
- id: isort
|
| 21 |
+
args: ["--profile", "black"]
|
| 22 |
+
- repo: https://github.com/asottile/pyupgrade
|
| 23 |
+
rev: v3.19.1
|
| 24 |
+
hooks:
|
| 25 |
+
- id: pyupgrade
|
| 26 |
+
args: ["--py310-plus"]
|
| 27 |
+
- repo: https://github.com/psf/black
|
| 28 |
+
rev: 25.1.0
|
| 29 |
+
hooks:
|
| 30 |
+
- id: black
|
| 31 |
+
args:
|
| 32 |
+
- --line-length=120
|
| 33 |
+
- repo: https://github.com/jorisroovers/gitlint
|
| 34 |
+
rev: v0.19.1
|
| 35 |
+
hooks:
|
| 36 |
+
- id: gitlint
|
| 37 |
+
name: gitlint
|
| 38 |
+
language: python
|
| 39 |
+
entry: gitlint
|
| 40 |
+
args: [--staged, --msg-filename]
|
| 41 |
+
stages: [commit-msg]
|
| 42 |
+
- repo: https://github.com/python-poetry/poetry
|
| 43 |
+
rev: '1.8.0'
|
| 44 |
+
hooks:
|
| 45 |
+
- id: poetry-check
|
| 46 |
+
- id: poetry-lock
|
| 47 |
+
args: [--no-update]
|
| 48 |
+
language_version: "3.10"
|
| 49 |
+
- id: poetry-export
|
| 50 |
+
name: poetry export for base requirements
|
| 51 |
+
args: [-f, requirements.txt, -o, requirements.txt, -n, --only=main, --without-hashes]
|
DEVELOPMENT.md
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# IBM Research Hugging Face Spaces gradio template
|
| 2 |
+
|
| 3 |
+
This template repository lets you quickly build a [gradio](https://www.gradio.app/) Hugging Face spaces demo for the [ibm-granite org](https://huggingface.co/ibm-granite). It is set up with the requirements, theming and analytics for the ibm-granite org as well as pre-commit hooks and linting configuration to maintain a consistent code standard across all demos.
|
| 4 |
+
|
| 5 |
+
## 👩💻 Introduction
|
| 6 |
+
|
| 7 |
+
To deploy demos to the ibm-granite org on Hugging Face, you will be working with the Research Design Technical Experiences (RDTE) team via this GitHub org. You will not gain access to the ibm-granite Hugging Face org as there are limited seats available. Hence, you will work via the RDTE team (who have write access) to create and deploy demos to Hugging Face.
|
| 8 |
+
|
| 9 |
+
## 🛠️ Getting started
|
| 10 |
+
|
| 11 |
+
This is the place to start when building gradio demos for IBM Granite. Complete the following steps to get a repository set up and configured for your demo as well as the deployment pipeline to validate and push it to Hugging Face spaces.
|
| 12 |
+
|
| 13 |
+
1. [Raise an onboarding request](https://github.ibm.com/ibm-huggingface-space-demos/deployment/issues/new?assignees=james-sutton,gwhite&labels=onboarding%2Ctriage&projects=&template=onboarding.yaml&title=%5BOnboarding%5D%3A+). Please fill the templated onboarding request to get a new repository set up for you in this org and to give access to anything else required.
|
| 14 |
+
2. Once your repository has been created, please either update it with your existing demo if you have one, or have a play with the example and modify it to create your new demo. You'll be working in the `main` branch whilst developing your demo. Your `main` branch is linked to the "QA" instance of your demo in the IBM org on Hugging Face.
|
| 15 |
+
3. Make sure that you follow this development guide and use the pre-configured pre-commit hooks before every commit and push.
|
| 16 |
+
4. Once you are happy with your demo and want to get it deployed into production on Hugging Face spaces in the ibm-granite org, open a pull request to merge the `main` branch into the `stable` branch. The RDTE team will validate the demo works well both from a technical and UX standpoint. If your demo needs any custom environment variables or secrets, let the RDTE team know and we will contact you directly to get them added to the Space configuration on Hugging Face.
|
| 17 |
+
5. Once the Pull request has been approved, you can merge it into the `stable` branch. A deployment will then push your changes to Hugging Face spaces where it will build and become available for use. Initially, both the "QA" and "Production" versions of your demo will be marked as private and only visible to members of the ibm-research org (QA) and ibm-granite org (production) that have logged into Hugging Face. The "QA" version will always remain private in the ibm-research org. However, when the RDTE team are happy to publish the demo to stable, they will mark the "Production" version as public in the ibm-granite org.
|
| 18 |
+
|
| 19 |
+
### Onboarding Process Summary
|
| 20 |
+
|
| 21 |
+
The following diagram explains the onboarding process. Actions that you, the developer, take are shown in darker blue. Actions that we, the RDTE team, take are shown in lighter blue. The lighter blue steps that have darker borders are automations maintained by the RDTE team, these steps require no manual intervention.
|
| 22 |
+
|
| 23 |
+
```mermaid
|
| 24 |
+
flowchart TD
|
| 25 |
+
1[Developer opens onboarding ticket in GHE Org]:::developer-->2
|
| 26 |
+
2{RDTE team review the request}:::rdte--Request returned with comments-->1
|
| 27 |
+
2--Approved-->3
|
| 28 |
+
3[Developer is invited to join the ibm-research org on HF]:::rdte-->4
|
| 29 |
+
4[New git repository created from template and configured]:::rdte-->5
|
| 30 |
+
5[QA HF space created in IBM org]:::rdte-->6
|
| 31 |
+
6[Developers push commits to main branch]:::developer-->7
|
| 32 |
+
7-->6
|
| 33 |
+
7[Main branch deployed to QA space in ibm-granite org on HF]:::rdteauto-->8
|
| 34 |
+
8[Developers open/update PR to merge main branch to stable branch]:::developer-->9
|
| 35 |
+
9{PR review}:::rdte--PR Approved-->10
|
| 36 |
+
9--Changes requested-->6
|
| 37 |
+
10{Prod space exists?}:::rdte-- Yes -->12
|
| 38 |
+
10-- No -->11
|
| 39 |
+
11[Prod HF space created in ibm-granite org]:::rdte-->12
|
| 40 |
+
12[Merge PR]:::developer-->13
|
| 41 |
+
13[Stable branch deployed to prod space in ibm-granite org on HF]:::rdteauto
|
| 42 |
+
classDef rdte fill:#EDF5FF,stroke:#D0E2FF,color:#000;
|
| 43 |
+
classDef rdteauto fill:#EDF5FF,stroke:#78A9FF,color:#000;
|
| 44 |
+
classDef developer fill:#A6C8FF,stroke:#78A9FF,color:#000;
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
## 🛠️ Development guide
|
| 48 |
+
|
| 49 |
+
Further information on developing the code in this repository is provided below.
|
| 50 |
+
|
| 51 |
+
### Clone your code repository
|
| 52 |
+
|
| 53 |
+
Once you have been notified that your code repository has been created in this org, you can clone it to your local machine and start work.
|
| 54 |
+
|
| 55 |
+
If you just want to play with our template, you're welcome to [use it](https://github.ibm.com/new?template_name=gradio-template&template_owner=ibm-huggingface-space-demos) to create a new code repository in another org. Later, for deployment, you wil need to move your code to the repository created in this org.
|
| 56 |
+
|
| 57 |
+
### Prerequisites
|
| 58 |
+
|
| 59 |
+
Some things you will need to do on your machine before developing.
|
| 60 |
+
|
| 61 |
+
#### Precommit
|
| 62 |
+
|
| 63 |
+
[Precommit](https://pre-commit.com) is a tool that adds git commit hooks. You will need to [install](https://pre-commit.com/#install) it on your machine and then run within your code repository:
|
| 64 |
+
|
| 65 |
+
```shell
|
| 66 |
+
pre-commit install
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
You can manually run pre-commit using the following command:
|
| 70 |
+
|
| 71 |
+
```shell
|
| 72 |
+
# To run against staged files:
|
| 73 |
+
pre-commit run
|
| 74 |
+
|
| 75 |
+
# If you want to run against staged and unstaged files:
|
| 76 |
+
pre-commit run --all-files
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
It is important to run the pre-commit hooks and fix any files that fail before you commit and push to the repository as the pull request build will fail any PR that does not adhere to them i.e. the RDTE team will only accept your code for deployment to Hugging Face once it has passed all of the pre-commit checks.
|
| 80 |
+
|
| 81 |
+
#### Poetry
|
| 82 |
+
|
| 83 |
+
[Poetry](https://python-poetry.org/) is a tool for Python packaging, dependency and virtual environment management that is used to manage the development of this project. You will need to install Poetry locally. There are several ways to install it including through the package manager of your operating system, however, the easiest way to install is likely using their installer, as follows:
|
| 84 |
+
|
| 85 |
+
```shell
|
| 86 |
+
curl -sSL https://install.python-poetry.org | python3 -
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
You can also use `pip` and `pipx` to install poetry, the details of which are at https://python-poetry.org/docs/
|
| 90 |
+
|
| 91 |
+
Once installed, the project is configured and controlled via the `pyproject.toml` file with the current dependency tree stored in `poetry.lock`. You may also [configure poetry](https://python-poetry.org/docs/configuration/) further if you wish but there is no need to do so as the default options are sufficient. You may, however, wish to change some of the options set in this template:
|
| 92 |
+
| Setting | Notes |
|
| 93 |
+
| ------- | ----- |
|
| 94 |
+
| name | **Update this**, to reflect the name of your demo |
|
| 95 |
+
| version | **Update this**, to reflect the current version of your demo |
|
| 96 |
+
| description | **Update this**, to a short description of your demo |
|
| 97 |
+
| authors | **Update this**, to the list of authors of your demo |
|
| 98 |
+
|
| 99 |
+
## 🛠️ Install and run locally
|
| 100 |
+
|
| 101 |
+
To get set up ready to run the code in development mode:
|
| 102 |
+
|
| 103 |
+
```shell
|
| 104 |
+
# add the poetry shell and export plugins (you only need to do this once on your machine)
|
| 105 |
+
poetry self add poetry-plugin-shell
|
| 106 |
+
poetry self add poetry-plugin-export
|
| 107 |
+
|
| 108 |
+
# create and activate a python virtual environment
|
| 109 |
+
poetry shell
|
| 110 |
+
poetry install
|
| 111 |
+
|
| 112 |
+
# run the demo locally (for development with automatic reload)
|
| 113 |
+
gradio src/app.py
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
## 📝 Documenting your demo
|
| 117 |
+
|
| 118 |
+
If you would like to write some information/documentation about your demo that is intended for developers or other people that might want to run the demo from scratch, please use the [README.md](README.md) file, leaving the Hugging Face Spaces configuration header in place at the top of the file.
|
| 119 |
+
|
| 120 |
+
### Hugging face spaces configuration settings
|
| 121 |
+
|
| 122 |
+
Hugging Face allow the configuration of spaces demonstrations via the [README.md](README.md) file in the root of the project. There is a [Spaces Configuration Reference](https://huggingface.co/docs/hub/en/spaces-config-reference) guide that you can use to gain an understanding of the configuration options that can be specified here.
|
| 123 |
+
|
| 124 |
+
The template has a set of initial defaults, similar to these:
|
| 125 |
+
|
| 126 |
+
```
|
| 127 |
+
---
|
| 128 |
+
title: Granite 3.0 Chat
|
| 129 |
+
colorFrom: blue
|
| 130 |
+
colorTo: indigo
|
| 131 |
+
sdk: gradio
|
| 132 |
+
sdk_version: 5.9.1
|
| 133 |
+
app_file: src/app.py
|
| 134 |
+
pinned: false
|
| 135 |
+
license: apache-2.0
|
| 136 |
+
short_description: Chat with IBM Granite 3.0
|
| 137 |
+
---
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
#### Options
|
| 141 |
+
|
| 142 |
+
The default options specified above:
|
| 143 |
+
| Setting | Notes |
|
| 144 |
+
| ------- | ----- |
|
| 145 |
+
| title | **Update this**, keep this short (recommend max 24 chars), this information is displayed in the centre of the demo description card |
|
| 146 |
+
| emoji | Do not update this, our demos will use a consistent emoji character |
|
| 147 |
+
| colorFrom | Do not update this, used in combination with colorTo to colourize the demo description card |
|
| 148 |
+
| colorTo | see colorFrom |
|
| 149 |
+
| sdk | Do not update this, our Gradio demos will always use the "gradio" setting |
|
| 150 |
+
| sdk_version | Update this if necessary for your demo to function, ideally should be set to the latest gradio version |
|
| 151 |
+
| app_file | Update this if necessary for your demo to function, should be set to the path of the main entry point to the demo |
|
| 152 |
+
| license | Do not update this, our demos are to always be apache-2.0 licensed |
|
| 153 |
+
| short_description | **Update this**, should be set to a few words that describe the demo in a little more detail than the title, this information is displayed in the bottom-right of the demo description card |
|
| 154 |
+
|
| 155 |
+
Other available options:
|
| 156 |
+
| Setting | Notes |
|
| 157 |
+
| ------- | ----- |
|
| 158 |
+
| python_version | You may optionally set this, best advice is to use the default Python version if possible (current default is Python 3.10) |
|
| 159 |
+
| suggested_hardware | Do not use this, unlikely to be required as demos run on ZeroGPU |
|
| 160 |
+
| suggested_storage | Do not use this, our demos do not require storage |
|
| 161 |
+
| app_port | Do not use this, not relevant for gradio demos |
|
| 162 |
+
| base_path | Do not use this, use the app_file setting |
|
| 163 |
+
| fullWidth | Do not use this, our demos will use a consistent default width |
|
| 164 |
+
| header | Do not use this, our demos will use a consistent header |
|
| 165 |
+
| models | Do not use this, let their parsing discover these from our code |
|
| 166 |
+
| datasets | Do not use this, let their parsing discover these from our code |
|
| 167 |
+
| tags | Do not use this, we are not tagging our demos |
|
| 168 |
+
| thumbnail | Do not use this, provides a thumbnail for social sharing of demos |
|
| 169 |
+
| pinned | Do not use this, the RDTE team will change this setting if it's deemed necessary |
|
| 170 |
+
| hf_oauth | Do not use this, we are not using OAuth |
|
| 171 |
+
| hf_oauth_scopes | Do not use this, we are not using OAuth |
|
| 172 |
+
| hf_oauth_expiration_minutes | Do not use this, we are not using OAuth |
|
| 173 |
+
| disable_embedding | Do not use this, leave at the default that allows embedding to take place |
|
| 174 |
+
| startup_duration_timeout | Do not use this, leave at the default 30 minutes |
|
| 175 |
+
| custom_headers | Do not use this, we do not need to add any custom HTTP headers |
|
| 176 |
+
| preload_from_hub | Do not use this, specifying this builds the models and data sets into the container image with the goal of making start up times faster due to not needing to download them each time. However, RDTE testing indicates this setting significantly increases the start up time for our relatively small Granite models |
|
README.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Granite Vision Document Intelligence
|
| 3 |
+
short_description: Document intelligence with Granite-Vision-4.1-4B
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.16.1
|
| 8 |
+
app_file: src/app.py
|
| 9 |
+
pinned: False
|
| 10 |
+
license: apache-2.0
|
| 11 |
+
---
|
poetry.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pyproject.toml
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[tool.poetry]
|
| 2 |
+
name = "granite-vision-document-intelligence"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Document intelligence demo powered by IBM Granite Vision and Docling"
|
| 5 |
+
authors = ["Pengyuan Li <pengyuan@ibm.com>"]
|
| 6 |
+
license = "Apache-2.0"
|
| 7 |
+
readme = "README.md"
|
| 8 |
+
package-mode = false
|
| 9 |
+
|
| 10 |
+
[tool.poetry.dependencies]
|
| 11 |
+
python = ">=3.10,<3.11"
|
| 12 |
+
gradio = "5.9.1"
|
| 13 |
+
torch = "2.4.0"
|
| 14 |
+
spaces = "0.30.4"
|
| 15 |
+
transformers = "^4.47.1"
|
| 16 |
+
accelerate = "^1.2.1"
|
| 17 |
+
PyMuPDF = ">=1.24.0"
|
| 18 |
+
Pillow = ">=10.0.0"
|
| 19 |
+
python-dotenv = "^1.0.0"
|
| 20 |
+
pandas = ">=2.0.0"
|
| 21 |
+
numpy = ">=1.24.0"
|
| 22 |
+
requests = ">=2.31.0"
|
| 23 |
+
tqdm = ">=4.66.0"
|
| 24 |
+
docling = ">=1.18.0"
|
| 25 |
+
docling-core = ">=2.5.0"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
[tool.poetry.group.dev.dependencies]
|
| 29 |
+
pre-commit = "^4.0.1"
|
| 30 |
+
git-lint = "^0.1.2"
|
| 31 |
+
ruff = "^0.8.3"
|
| 32 |
+
pytest = "^8.3.4"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
[build-system]
|
| 36 |
+
requires = ["poetry-core"]
|
| 37 |
+
build-backend = "poetry.core.masonry.api"
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
[tool.ruff]
|
| 41 |
+
select = [
|
| 42 |
+
"E", # pycodestyle
|
| 43 |
+
"F", # pyflakes
|
| 44 |
+
"UP", # pyupgrade
|
| 45 |
+
"D", # pydocstyle
|
| 46 |
+
"I", # isort
|
| 47 |
+
"B", # bugbear
|
| 48 |
+
"ANN", # annotations
|
| 49 |
+
"N", # pep8-naming
|
| 50 |
+
"C4", # Comprehensions
|
| 51 |
+
"DTZ", # DatetimeZ
|
| 52 |
+
"Q", # Quotes
|
| 53 |
+
"SIM", # Simplify
|
| 54 |
+
"RUF", # Ruff
|
| 55 |
+
]
|
| 56 |
+
ignore = ["D203", "D213"]
|
| 57 |
+
fixable = ["ALL"]
|
| 58 |
+
unfixable = []
|
| 59 |
+
line-length = 120
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
[tool.black]
|
| 63 |
+
line-length = 120
|
| 64 |
+
|
| 65 |
+
[tool.ruff.lint.pydocstyle]
|
| 66 |
+
convention = "google"
|
requirements.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==5.16.1
|
| 2 |
+
transformers>=5.6.2
|
| 3 |
+
torch>=2.5.0
|
| 4 |
+
accelerate>=1.2.1,<2.0.0
|
| 5 |
+
peft>=0.7.0
|
| 6 |
+
Pillow>=10.0.0
|
| 7 |
+
PyMuPDF>=1.24.0
|
| 8 |
+
python-dotenv>=1.0.0,<2.0.0
|
| 9 |
+
pandas>=2.0.0
|
| 10 |
+
numpy>=1.24.0
|
| 11 |
+
requests>=2.31.0
|
| 12 |
+
tqdm>=4.66.0
|
| 13 |
+
docling>=1.18.0
|
| 14 |
+
docling-core>=2.5.0
|
src/app.css
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
footer {
|
| 2 |
+
display: none !important;
|
| 3 |
+
}
|
| 4 |
+
|
| 5 |
+
.figure-image .image-container {
|
| 6 |
+
margin-left: 12px;
|
| 7 |
+
margin-right: 12px;
|
| 8 |
+
}
|
src/app.py
ADDED
|
@@ -0,0 +1,550 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Granite Vision Document Intelligence Demo.
|
| 2 |
+
|
| 3 |
+
Upload a PDF or image to explore Granite-Vision-4.1-4B capabilities including
|
| 4 |
+
Chart2CSV, Chart2Code, Chart2Summary, Table Extraction, and Image Q&A.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
# Monkey-patch gradio_client to handle bool JSON Schema values.
|
| 8 |
+
# gradio 5.x emits additionalProperties: false/true (valid JSON Schema)
|
| 9 |
+
# but gradio_client 1.5.x does not guard against bool in get_type(),
|
| 10 |
+
# causing TypeError on every request to the /info endpoint.
|
| 11 |
+
try:
|
| 12 |
+
import gradio_client.utils as _gcu
|
| 13 |
+
|
| 14 |
+
_orig_get_type = _gcu.get_type
|
| 15 |
+
_orig_j2p = _gcu._json_schema_to_python_type
|
| 16 |
+
|
| 17 |
+
def _patched_get_type(schema): # noqa: ANN001, ANN202
|
| 18 |
+
if not isinstance(schema, dict):
|
| 19 |
+
return "unknown"
|
| 20 |
+
return _orig_get_type(schema)
|
| 21 |
+
|
| 22 |
+
def _patched_j2p(schema, defs=None): # noqa: ANN001, ANN202
|
| 23 |
+
if not isinstance(schema, dict):
|
| 24 |
+
return "any" if schema else "unknown"
|
| 25 |
+
return _orig_j2p(schema, defs)
|
| 26 |
+
|
| 27 |
+
_gcu.get_type = _patched_get_type
|
| 28 |
+
_gcu._json_schema_to_python_type = _patched_j2p
|
| 29 |
+
except Exception: # noqa: BLE001
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
import os
|
| 33 |
+
from pathlib import Path
|
| 34 |
+
from typing import Any
|
| 35 |
+
|
| 36 |
+
import gradio as gr
|
| 37 |
+
from PIL import Image
|
| 38 |
+
|
| 39 |
+
from crops import extract_figures
|
| 40 |
+
from document_parser import parse_document
|
| 41 |
+
from infer_chart2csv import extract_csv_stream
|
| 42 |
+
from infer_vision_qa import answer_question_stream
|
| 43 |
+
from model_loader import load_processor
|
| 44 |
+
from pdf_io import load_pdf_pages
|
| 45 |
+
from themes.research_monochrome import theme
|
| 46 |
+
from ui_state import create_initial_state, hash_bytes, page_cache, parse_cache
|
| 47 |
+
|
| 48 |
+
# Pre-load the processor at startup (CPU-only, no GPU needed).
|
| 49 |
+
# This avoids paying the processor load cost on the first user request.
|
| 50 |
+
load_processor()
|
| 51 |
+
|
| 52 |
+
TITLE = "Granite Vision: Document Intelligence"
|
| 53 |
+
DESCRIPTION = (
|
| 54 |
+
"Upload a PDF, Word, Excel, PowerPoint, or image to explore Granite-Vision-4.1-4B's document intelligence capabilities — "
|
| 55 |
+
"including Chart2Summary, Chart2CSV, Chart2Code, Table Extraction, and Image Description — "
|
| 56 |
+
"with automatic Docling-powered parsing for documents and direct inference on uploaded images."
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".jfif", ".png", ".bmp", ".dib", ".gif", ".tif", ".tiff", ".webp"}
|
| 60 |
+
OFFICE_EXTENSIONS = {".docx", ".xlsx", ".pptx"}
|
| 61 |
+
|
| 62 |
+
css_file_path = Path(Path(__file__).parent / "app.css")
|
| 63 |
+
head_file_path = Path(Path(__file__).parent / "app_head.html")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _is_image_file(file_path: str) -> bool:
|
| 67 |
+
"""Check whether a file path points to a supported image format."""
|
| 68 |
+
ext = os.path.splitext(file_path)[1].lower()
|
| 69 |
+
return ext in IMAGE_EXTENSIONS
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _is_office_file(file_path: str) -> bool:
|
| 73 |
+
"""Check whether a file path points to a supported Office format (DOCX/XLSX/PPTX)."""
|
| 74 |
+
ext = os.path.splitext(file_path)[1].lower()
|
| 75 |
+
return ext in OFFICE_EXTENSIONS
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def process_upload(file_path: str, session_state: dict[str, Any]) -> tuple:
|
| 79 |
+
"""Parse an uploaded PDF or load an image and extract figures.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
file_path: Path to the uploaded file.
|
| 83 |
+
session_state: Current Gradio session state dictionary.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
Tuple of (status, html_content, fig_status, fig_caption, fig_image, session_state).
|
| 87 |
+
"""
|
| 88 |
+
max_pages = 20
|
| 89 |
+
|
| 90 |
+
session_state["current_figure_index"] = 0
|
| 91 |
+
session_state["conversation_history"] = []
|
| 92 |
+
session_state["current_image_path"] = None
|
| 93 |
+
|
| 94 |
+
if not file_path:
|
| 95 |
+
return "Please upload a PDF, Office document, or image.", "No document loaded", "No figures", "", None, session_state
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
with open(file_path, "rb") as f:
|
| 99 |
+
file_bytes = f.read()
|
| 100 |
+
|
| 101 |
+
file_hash = hash_bytes(file_bytes)
|
| 102 |
+
session_state["uploaded_file_hash"] = file_hash
|
| 103 |
+
session_state["uploaded_file_bytes"] = file_bytes
|
| 104 |
+
|
| 105 |
+
if _is_image_file(file_path):
|
| 106 |
+
image = Image.open(file_path).convert("RGB")
|
| 107 |
+
figures_info = [{"image": image, "page": 0, "bbox": None, "caption": ""}]
|
| 108 |
+
|
| 109 |
+
session_state["page_images"] = [image]
|
| 110 |
+
session_state["parsed_result"] = {}
|
| 111 |
+
session_state["figures_info"] = figures_info
|
| 112 |
+
session_state["selected_figure"] = figures_info[0]
|
| 113 |
+
|
| 114 |
+
return (
|
| 115 |
+
"Image loaded successfully.\nNumber of figures: 1.",
|
| 116 |
+
"Image uploaded directly (no document parsing needed)",
|
| 117 |
+
"Figure 1 of 1 (Page 1)",
|
| 118 |
+
"",
|
| 119 |
+
image,
|
| 120 |
+
session_state,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
file_ext = os.path.splitext(file_path)[1].lower()
|
| 124 |
+
is_office = _is_office_file(file_path)
|
| 125 |
+
fmt_label = file_ext.lstrip(".").upper()
|
| 126 |
+
status_lines = [f"{fmt_label} loaded successfully."]
|
| 127 |
+
|
| 128 |
+
if is_office:
|
| 129 |
+
page_images = []
|
| 130 |
+
session_state["page_images"] = []
|
| 131 |
+
else:
|
| 132 |
+
cache_key = f"{file_hash}_{max_pages}"
|
| 133 |
+
if cache_key in page_cache:
|
| 134 |
+
page_images = page_cache[cache_key]
|
| 135 |
+
else:
|
| 136 |
+
page_images = load_pdf_pages(file_bytes, max_pages=max_pages)
|
| 137 |
+
page_cache[cache_key] = page_images
|
| 138 |
+
session_state["page_images"] = page_images
|
| 139 |
+
status_lines.append(f"Number of pages rendered: {len(page_images)} (max {max_pages}).")
|
| 140 |
+
|
| 141 |
+
if file_hash in parse_cache:
|
| 142 |
+
parse_result = parse_cache[file_hash]
|
| 143 |
+
else:
|
| 144 |
+
parse_result = parse_document(file_bytes, file_ext=file_ext)
|
| 145 |
+
parse_cache[file_hash] = parse_result
|
| 146 |
+
session_state["parsed_result"] = parse_result
|
| 147 |
+
status_lines.append("Document parsing done using Docling.")
|
| 148 |
+
|
| 149 |
+
figures_info = extract_figures(page_images, parse_result.get("figures", []))
|
| 150 |
+
session_state["figures_info"] = figures_info
|
| 151 |
+
status_lines.append(f"Number of figures extracted: {len(figures_info)}.")
|
| 152 |
+
|
| 153 |
+
if figures_info:
|
| 154 |
+
session_state["selected_figure"] = figures_info[0]
|
| 155 |
+
fig_status = f"Figure 1 of {len(figures_info)} (Page {figures_info[0]['page'] + 1})"
|
| 156 |
+
fig_caption = figures_info[0].get("caption", "No caption")
|
| 157 |
+
fig_image = figures_info[0]["image"]
|
| 158 |
+
else:
|
| 159 |
+
session_state["selected_figure"] = None
|
| 160 |
+
fig_status = "No figures found"
|
| 161 |
+
fig_caption = ""
|
| 162 |
+
fig_image = None
|
| 163 |
+
|
| 164 |
+
html_content = parse_result.get("html", "No content available")
|
| 165 |
+
status = "\n".join(status_lines)
|
| 166 |
+
|
| 167 |
+
return status, html_content, fig_status, fig_caption, fig_image, session_state
|
| 168 |
+
|
| 169 |
+
except Exception as e: # noqa: BLE001
|
| 170 |
+
import traceback
|
| 171 |
+
|
| 172 |
+
print(f"Error: {e}")
|
| 173 |
+
traceback.print_exc()
|
| 174 |
+
return f"Error: {e!s}", f"Error loading document: {e!s}", "Error", "", None, session_state
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def _get_figure_display(session_state: dict[str, Any]) -> tuple[str, str, Image.Image | None]:
|
| 178 |
+
"""Return the current figure's display info, caption, and image.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
session_state: Current session state dictionary.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
Tuple of (fig_status, fig_caption, fig_image).
|
| 185 |
+
"""
|
| 186 |
+
figures_info = session_state.get("figures_info", [])
|
| 187 |
+
idx = session_state.get("current_figure_index", 0)
|
| 188 |
+
|
| 189 |
+
if not figures_info:
|
| 190 |
+
return "No figures found", "", None
|
| 191 |
+
|
| 192 |
+
fig = figures_info[idx]
|
| 193 |
+
fig_status = f"Figure {idx + 1} of {len(figures_info)} (Page {fig['page'] + 1})"
|
| 194 |
+
fig_caption = fig.get("caption", "No caption")
|
| 195 |
+
return fig_status, fig_caption, fig["image"]
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def next_figure(session_state: dict[str, Any]) -> tuple:
|
| 199 |
+
"""Advance to the next figure.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
session_state: Current session state dictionary.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
Tuple of (fig_status, fig_caption, fig_image, session_state).
|
| 206 |
+
"""
|
| 207 |
+
figures_info = session_state.get("figures_info", [])
|
| 208 |
+
|
| 209 |
+
if not figures_info:
|
| 210 |
+
return "No figures found", "", None, session_state
|
| 211 |
+
|
| 212 |
+
idx = (session_state.get("current_figure_index", 0) + 1) % len(figures_info)
|
| 213 |
+
session_state["current_figure_index"] = idx
|
| 214 |
+
session_state["selected_figure"] = figures_info[idx]
|
| 215 |
+
session_state["conversation_history"] = []
|
| 216 |
+
session_state["current_image_path"] = None
|
| 217 |
+
|
| 218 |
+
fig_status, fig_caption, fig_image = _get_figure_display(session_state)
|
| 219 |
+
return fig_status, fig_caption, fig_image, session_state
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def prev_figure(session_state: dict[str, Any]) -> tuple:
|
| 223 |
+
"""Go back to the previous figure.
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
session_state: Current session state dictionary.
|
| 227 |
+
|
| 228 |
+
Returns:
|
| 229 |
+
Tuple of (fig_status, fig_caption, fig_image, session_state).
|
| 230 |
+
"""
|
| 231 |
+
figures_info = session_state.get("figures_info", [])
|
| 232 |
+
|
| 233 |
+
if not figures_info:
|
| 234 |
+
return "No figures found", "", None, session_state
|
| 235 |
+
|
| 236 |
+
idx = (session_state.get("current_figure_index", 0) - 1) % len(figures_info)
|
| 237 |
+
session_state["current_figure_index"] = idx
|
| 238 |
+
session_state["selected_figure"] = figures_info[idx]
|
| 239 |
+
session_state["conversation_history"] = []
|
| 240 |
+
session_state["current_image_path"] = None
|
| 241 |
+
|
| 242 |
+
fig_status, fig_caption, fig_image = _get_figure_display(session_state)
|
| 243 |
+
return fig_status, fig_caption, fig_image, session_state
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def describe_image_helper(session_state: dict[str, Any]): # noqa: ANN201
|
| 247 |
+
"""Generate a detailed description of the selected figure (streaming)."""
|
| 248 |
+
selected_fig = session_state.get("selected_figure")
|
| 249 |
+
if selected_fig is None:
|
| 250 |
+
yield "No figure selected", session_state
|
| 251 |
+
return
|
| 252 |
+
try:
|
| 253 |
+
image = selected_fig["image"]
|
| 254 |
+
for partial in answer_question_stream(image, "Describe this image in detail", [], None):
|
| 255 |
+
yield partial, session_state
|
| 256 |
+
except Exception as e: # noqa: BLE001
|
| 257 |
+
yield f"Error: {e!s}", session_state
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def load_current_figure(session_state: dict[str, Any]) -> tuple:
|
| 261 |
+
"""Load the current figure into display components (called on tab select).
|
| 262 |
+
|
| 263 |
+
Also clears conversation history so each tab starts fresh.
|
| 264 |
+
"""
|
| 265 |
+
session_state["conversation_history"] = []
|
| 266 |
+
session_state["current_image_path"] = None
|
| 267 |
+
fig_status, fig_caption, fig_image = _get_figure_display(session_state)
|
| 268 |
+
return fig_status, fig_caption, fig_image, session_state
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
PROMPT_TEXT_CODE = (
|
| 272 |
+
"Please take a look at this chart image and generate Python code that perfectly reconstructs this chart image."
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
PROMPT_TEXT_SUMMARY = "<chart2summary>"
|
| 276 |
+
|
| 277 |
+
PROMPT_TEXT_TABLE = "<tables_html>"
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def extract_code_helper(session_state: dict[str, Any]): # noqa: ANN201
|
| 281 |
+
"""Generate Python code to reconstruct the selected chart (streaming)."""
|
| 282 |
+
selected_fig = session_state.get("selected_figure")
|
| 283 |
+
if selected_fig is None:
|
| 284 |
+
yield "No figure selected", session_state
|
| 285 |
+
return
|
| 286 |
+
try:
|
| 287 |
+
image = selected_fig["image"]
|
| 288 |
+
for partial in answer_question_stream(image, PROMPT_TEXT_CODE, [], None):
|
| 289 |
+
yield partial, session_state
|
| 290 |
+
except Exception as e: # noqa: BLE001
|
| 291 |
+
yield f"Error: {e!s}", session_state
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def extract_summary_helper(session_state: dict[str, Any]): # noqa: ANN201
|
| 295 |
+
"""Generate a text summary of the selected chart (streaming)."""
|
| 296 |
+
selected_fig = session_state.get("selected_figure")
|
| 297 |
+
if selected_fig is None:
|
| 298 |
+
yield "No figure selected", session_state
|
| 299 |
+
return
|
| 300 |
+
try:
|
| 301 |
+
image = selected_fig["image"]
|
| 302 |
+
for partial in answer_question_stream(image, PROMPT_TEXT_SUMMARY, [], None):
|
| 303 |
+
yield partial, session_state
|
| 304 |
+
except Exception as e: # noqa: BLE001
|
| 305 |
+
yield f"Error: {e!s}", session_state
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def extract_table_helper(session_state: dict[str, Any]): # noqa: ANN201
|
| 309 |
+
"""Extract tables as HTML from the selected figure (streaming)."""
|
| 310 |
+
import re
|
| 311 |
+
selected_fig = session_state.get("selected_figure")
|
| 312 |
+
if selected_fig is None:
|
| 313 |
+
yield "No figure selected", session_state
|
| 314 |
+
return
|
| 315 |
+
try:
|
| 316 |
+
image = selected_fig["image"]
|
| 317 |
+
accumulated = ""
|
| 318 |
+
for partial in answer_question_stream(image, PROMPT_TEXT_TABLE, [], None):
|
| 319 |
+
accumulated = partial
|
| 320 |
+
yield accumulated, session_state
|
| 321 |
+
# Final pass: strip markdown fences / brackets the model may wrap around HTML
|
| 322 |
+
cleaned = re.sub(r"^```(?:html)?\s*", "", accumulated.strip())
|
| 323 |
+
cleaned = re.sub(r"\s*```$", "", cleaned.strip())
|
| 324 |
+
cleaned = re.sub(r"^\[\s*", "", cleaned.strip())
|
| 325 |
+
cleaned = re.sub(r"\s*\]$", "", cleaned.strip())
|
| 326 |
+
if cleaned != accumulated:
|
| 327 |
+
yield cleaned, session_state
|
| 328 |
+
except Exception as e: # noqa: BLE001
|
| 329 |
+
yield f"Error: {e!s}", session_state
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def extract_csv_helper(session_state: dict[str, Any]): # noqa: ANN201
|
| 333 |
+
"""Extract CSV data from the selected chart (streaming)."""
|
| 334 |
+
selected_fig = session_state.get("selected_figure")
|
| 335 |
+
if selected_fig is None:
|
| 336 |
+
yield "No figure selected", session_state
|
| 337 |
+
return
|
| 338 |
+
try:
|
| 339 |
+
image = selected_fig["image"]
|
| 340 |
+
for partial in extract_csv_stream(image):
|
| 341 |
+
yield partial, session_state
|
| 342 |
+
except Exception as e: # noqa: BLE001
|
| 343 |
+
yield f"Error: {e!s}", session_state
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def _make_nav(nav_fn: Any) -> Any:
|
| 347 |
+
"""Wrap a nav function to also clear the result panel when navigating figures."""
|
| 348 |
+
def _wrapper(session_state: dict[str, Any]) -> tuple:
|
| 349 |
+
fig_status, fig_caption, fig_image, state = nav_fn(session_state)
|
| 350 |
+
return fig_status, fig_caption, fig_image, "", state
|
| 351 |
+
return _wrapper
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
with gr.Blocks(
|
| 355 |
+
title=TITLE,
|
| 356 |
+
theme=theme,
|
| 357 |
+
css_paths=css_file_path,
|
| 358 |
+
head_paths=head_file_path,
|
| 359 |
+
fill_height=True,
|
| 360 |
+
) as demo:
|
| 361 |
+
gr.Markdown(f"# {TITLE}")
|
| 362 |
+
gr.Markdown(DESCRIPTION)
|
| 363 |
+
|
| 364 |
+
session_state = gr.State(create_initial_state())
|
| 365 |
+
|
| 366 |
+
# Per-tab nav wrappers: clear result output when switching figures
|
| 367 |
+
_sum_prev = _make_nav(prev_figure)
|
| 368 |
+
_sum_next = _make_nav(next_figure)
|
| 369 |
+
_csv_prev = _make_nav(prev_figure)
|
| 370 |
+
_csv_next = _make_nav(next_figure)
|
| 371 |
+
_code_prev = _make_nav(prev_figure)
|
| 372 |
+
_code_next = _make_nav(next_figure)
|
| 373 |
+
_tbl_prev = _make_nav(prev_figure)
|
| 374 |
+
_tbl_next = _make_nav(next_figure)
|
| 375 |
+
_qa_prev = _make_nav(prev_figure)
|
| 376 |
+
_qa_next = _make_nav(next_figure)
|
| 377 |
+
|
| 378 |
+
with gr.Tabs():
|
| 379 |
+
# TAB 1: UPLOAD & PARSE
|
| 380 |
+
with gr.Tab("Parse & Extract"):
|
| 381 |
+
file_path = gr.File(
|
| 382 |
+
label="Upload PDF, Office Document, or Image",
|
| 383 |
+
file_types=[".pdf", ".docx", ".xlsx", ".pptx", ".jpg", ".jpeg", ".jfif", ".png", ".bmp", ".dib", ".gif", ".tif", ".tiff", ".webp"],
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
status = gr.Textbox(label="Status", interactive=False, lines=2)
|
| 387 |
+
|
| 388 |
+
with gr.Row():
|
| 389 |
+
with gr.Column(scale=1):
|
| 390 |
+
html_view = gr.Textbox(
|
| 391 |
+
label="Parsed Document (Docling)",
|
| 392 |
+
value="Upload a document to see parsed content",
|
| 393 |
+
lines=35,
|
| 394 |
+
interactive=False,
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
with gr.Column(scale=1):
|
| 398 |
+
gr.Markdown("### Extracted Figures")
|
| 399 |
+
fig_info = gr.Textbox(label="Figure Info", interactive=False)
|
| 400 |
+
fig_caption = gr.Textbox(label="Caption", interactive=False)
|
| 401 |
+
fig_image = gr.Image(label="Figure", type="pil", elem_classes=["figure-image"])
|
| 402 |
+
|
| 403 |
+
with gr.Row():
|
| 404 |
+
prev_btn = gr.Button("Previous", scale=1)
|
| 405 |
+
next_btn = gr.Button("Next", scale=1)
|
| 406 |
+
|
| 407 |
+
file_path.upload(
|
| 408 |
+
process_upload,
|
| 409 |
+
inputs=[file_path, session_state],
|
| 410 |
+
outputs=[status, html_view, fig_info, fig_caption, fig_image, session_state],
|
| 411 |
+
)
|
| 412 |
+
next_btn.click(
|
| 413 |
+
next_figure,
|
| 414 |
+
inputs=[session_state],
|
| 415 |
+
outputs=[fig_info, fig_caption, fig_image, session_state],
|
| 416 |
+
)
|
| 417 |
+
prev_btn.click(
|
| 418 |
+
prev_figure,
|
| 419 |
+
inputs=[session_state],
|
| 420 |
+
outputs=[fig_info, fig_caption, fig_image, session_state],
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
# TAB 2: CHART2SUMMARY
|
| 424 |
+
with gr.Tab("Chart2Summary") as summary_tab:
|
| 425 |
+
gr.Markdown("Generate a text summary of the selected chart")
|
| 426 |
+
|
| 427 |
+
with gr.Row():
|
| 428 |
+
with gr.Column(scale=1):
|
| 429 |
+
gr.Markdown("### Figure")
|
| 430 |
+
summary_fig_info = gr.Textbox(label="Figure Info", interactive=False)
|
| 431 |
+
summary_fig_caption = gr.Textbox(label="Caption", interactive=False)
|
| 432 |
+
summary_fig_image = gr.Image(label="Figure", type="pil", elem_classes=["figure-image"])
|
| 433 |
+
|
| 434 |
+
with gr.Row():
|
| 435 |
+
summary_prev_btn = gr.Button("Previous", scale=1)
|
| 436 |
+
summary_next_btn = gr.Button("Next", scale=1)
|
| 437 |
+
|
| 438 |
+
with gr.Column(scale=1):
|
| 439 |
+
gr.Markdown("### Summary")
|
| 440 |
+
summary_btn = gr.Button("Generate Summary", variant="primary")
|
| 441 |
+
summary_out = gr.Textbox(label="Chart Summary", lines=20, interactive=False)
|
| 442 |
+
|
| 443 |
+
summary_prev_btn.click(_sum_prev, inputs=[session_state], outputs=[summary_fig_info, summary_fig_caption, summary_fig_image, summary_out, session_state])
|
| 444 |
+
summary_next_btn.click(_sum_next, inputs=[session_state], outputs=[summary_fig_info, summary_fig_caption, summary_fig_image, summary_out, session_state])
|
| 445 |
+
summary_btn.click(extract_summary_helper, inputs=[session_state], outputs=[summary_out, session_state])
|
| 446 |
+
summary_tab.select(load_current_figure, inputs=[session_state], outputs=[summary_fig_info, summary_fig_caption, summary_fig_image, session_state])
|
| 447 |
+
|
| 448 |
+
# TAB 3: CHART2CSV
|
| 449 |
+
with gr.Tab("Chart2CSV") as csv_tab:
|
| 450 |
+
gr.Markdown("Extract CSV data from the selected chart")
|
| 451 |
+
|
| 452 |
+
with gr.Row():
|
| 453 |
+
with gr.Column(scale=1):
|
| 454 |
+
gr.Markdown("### Figure")
|
| 455 |
+
csv_fig_info = gr.Textbox(label="Figure Info", interactive=False)
|
| 456 |
+
csv_fig_caption = gr.Textbox(label="Caption", interactive=False)
|
| 457 |
+
csv_fig_image = gr.Image(label="Figure", type="pil", elem_classes=["figure-image"])
|
| 458 |
+
|
| 459 |
+
with gr.Row():
|
| 460 |
+
csv_prev_btn = gr.Button("Previous", scale=1)
|
| 461 |
+
csv_next_btn = gr.Button("Next", scale=1)
|
| 462 |
+
|
| 463 |
+
with gr.Column(scale=1):
|
| 464 |
+
gr.Markdown("### CSV Extraction")
|
| 465 |
+
extract_btn = gr.Button("Extract CSV", variant="primary")
|
| 466 |
+
csv_out = gr.Textbox(label="CSV", lines=20, interactive=False)
|
| 467 |
+
|
| 468 |
+
csv_prev_btn.click(_csv_prev, inputs=[session_state], outputs=[csv_fig_info, csv_fig_caption, csv_fig_image, csv_out, session_state])
|
| 469 |
+
csv_next_btn.click(_csv_next, inputs=[session_state], outputs=[csv_fig_info, csv_fig_caption, csv_fig_image, csv_out, session_state])
|
| 470 |
+
extract_btn.click(extract_csv_helper, inputs=[session_state], outputs=[csv_out, session_state])
|
| 471 |
+
csv_tab.select(load_current_figure, inputs=[session_state], outputs=[csv_fig_info, csv_fig_caption, csv_fig_image, session_state])
|
| 472 |
+
|
| 473 |
+
# TAB 4: CHART2CODE
|
| 474 |
+
with gr.Tab("Chart2Code") as code_tab:
|
| 475 |
+
gr.Markdown("Generate Python code to reconstruct the selected chart")
|
| 476 |
+
|
| 477 |
+
with gr.Row():
|
| 478 |
+
with gr.Column(scale=1):
|
| 479 |
+
gr.Markdown("### Figure")
|
| 480 |
+
code_fig_info = gr.Textbox(label="Figure Info", interactive=False)
|
| 481 |
+
code_fig_caption = gr.Textbox(label="Caption", interactive=False)
|
| 482 |
+
code_fig_image = gr.Image(label="Figure", type="pil", elem_classes=["figure-image"])
|
| 483 |
+
|
| 484 |
+
with gr.Row():
|
| 485 |
+
code_prev_btn = gr.Button("Previous", scale=1)
|
| 486 |
+
code_next_btn = gr.Button("Next", scale=1)
|
| 487 |
+
|
| 488 |
+
with gr.Column(scale=1):
|
| 489 |
+
gr.Markdown("### Generated Code")
|
| 490 |
+
code_btn = gr.Button("Generate Code", variant="primary")
|
| 491 |
+
code_out = gr.Textbox(label="Python Code", lines=20, interactive=False)
|
| 492 |
+
|
| 493 |
+
code_prev_btn.click(_code_prev, inputs=[session_state], outputs=[code_fig_info, code_fig_caption, code_fig_image, code_out, session_state])
|
| 494 |
+
code_next_btn.click(_code_next, inputs=[session_state], outputs=[code_fig_info, code_fig_caption, code_fig_image, code_out, session_state])
|
| 495 |
+
code_btn.click(extract_code_helper, inputs=[session_state], outputs=[code_out, session_state])
|
| 496 |
+
code_tab.select(load_current_figure, inputs=[session_state], outputs=[code_fig_info, code_fig_caption, code_fig_image, session_state])
|
| 497 |
+
|
| 498 |
+
# TAB 5: TABLE EXTRACTION
|
| 499 |
+
with gr.Tab("Table Extraction") as table_tab:
|
| 500 |
+
gr.Markdown("Extract table data as HTML from the selected figure")
|
| 501 |
+
|
| 502 |
+
with gr.Row():
|
| 503 |
+
with gr.Column(scale=1):
|
| 504 |
+
gr.Markdown("### Figure")
|
| 505 |
+
table_fig_info = gr.Textbox(label="Figure Info", interactive=False)
|
| 506 |
+
table_fig_caption = gr.Textbox(label="Caption", interactive=False)
|
| 507 |
+
table_fig_image = gr.Image(label="Figure", type="pil", elem_classes=["figure-image"])
|
| 508 |
+
|
| 509 |
+
with gr.Row():
|
| 510 |
+
table_prev_btn = gr.Button("Previous", scale=1)
|
| 511 |
+
table_next_btn = gr.Button("Next", scale=1)
|
| 512 |
+
|
| 513 |
+
with gr.Column(scale=1):
|
| 514 |
+
gr.Markdown("### Table Extraction")
|
| 515 |
+
table_btn = gr.Button("Extract Table", variant="primary")
|
| 516 |
+
table_out = gr.HTML(value="<p>Upload a document and click Extract Table to see results here</p>")
|
| 517 |
+
|
| 518 |
+
table_prev_btn.click(_tbl_prev, inputs=[session_state], outputs=[table_fig_info, table_fig_caption, table_fig_image, table_out, session_state])
|
| 519 |
+
table_next_btn.click(_tbl_next, inputs=[session_state], outputs=[table_fig_info, table_fig_caption, table_fig_image, table_out, session_state])
|
| 520 |
+
table_btn.click(extract_table_helper, inputs=[session_state], outputs=[table_out, session_state])
|
| 521 |
+
table_tab.select(load_current_figure, inputs=[session_state], outputs=[table_fig_info, table_fig_caption, table_fig_image, session_state])
|
| 522 |
+
|
| 523 |
+
# TAB 6: IMAGE DESCRIPTION
|
| 524 |
+
with gr.Tab("Image Description") as qa_tab:
|
| 525 |
+
gr.Markdown("Get a detailed description of the selected figure")
|
| 526 |
+
|
| 527 |
+
with gr.Row():
|
| 528 |
+
with gr.Column(scale=1):
|
| 529 |
+
gr.Markdown("### Figure")
|
| 530 |
+
qa_fig_info = gr.Textbox(label="Figure Info", interactive=False)
|
| 531 |
+
qa_fig_caption = gr.Textbox(label="Caption", interactive=False)
|
| 532 |
+
qa_fig_image = gr.Image(label="Figure", type="pil", elem_classes=["figure-image"])
|
| 533 |
+
|
| 534 |
+
with gr.Row():
|
| 535 |
+
qa_prev_btn = gr.Button("Previous", scale=1)
|
| 536 |
+
qa_next_btn = gr.Button("Next", scale=1)
|
| 537 |
+
|
| 538 |
+
with gr.Column(scale=1):
|
| 539 |
+
gr.Markdown("### Description")
|
| 540 |
+
describe_btn = gr.Button("Describe Image", variant="primary")
|
| 541 |
+
answer = gr.Textbox(label="Description", lines=20, interactive=False)
|
| 542 |
+
|
| 543 |
+
qa_prev_btn.click(_qa_prev, inputs=[session_state], outputs=[qa_fig_info, qa_fig_caption, qa_fig_image, answer, session_state])
|
| 544 |
+
qa_next_btn.click(_qa_next, inputs=[session_state], outputs=[qa_fig_info, qa_fig_caption, qa_fig_image, answer, session_state])
|
| 545 |
+
describe_btn.click(describe_image_helper, inputs=[session_state], outputs=[answer, session_state])
|
| 546 |
+
qa_tab.select(load_current_figure, inputs=[session_state], outputs=[qa_fig_info, qa_fig_caption, qa_fig_image, session_state])
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
if __name__ == "__main__":
|
| 550 |
+
demo.launch(ssr_mode=False)
|
src/app_head.html
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<script
|
| 2 |
+
async
|
| 3 |
+
src="https://www.googletagmanager.com/gtag/js?id=G-C6LFT227RC"
|
| 4 |
+
></script>
|
| 5 |
+
<script>
|
| 6 |
+
window.dataLayer = window.dataLayer || [];
|
| 7 |
+
function gtag() {
|
| 8 |
+
dataLayer.push(arguments);
|
| 9 |
+
}
|
| 10 |
+
gtag("js", new Date());
|
| 11 |
+
gtag("config", "G-C6LFT227RC");
|
| 12 |
+
</script>
|
src/crops.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Figure extraction and processing using PIL images from Docling."""
|
| 2 |
+
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def extract_figures(
|
| 9 |
+
page_images: list[Image.Image],
|
| 10 |
+
figures_info: list[dict[str, Any]],
|
| 11 |
+
) -> list[dict[str, Any]]:
|
| 12 |
+
"""Process figures extracted by Docling into a consistent format.
|
| 13 |
+
|
| 14 |
+
Docling extracts figure images directly. This function validates them,
|
| 15 |
+
creates thumbnails, and falls back to page previews when no figures exist.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
page_images: List of rendered page images (used as fallback only).
|
| 19 |
+
figures_info: List of figure metadata dicts from Docling, each containing
|
| 20 |
+
``image``, ``bbox``, ``page``, and ``caption`` fields.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
List of dicts with ``image`` (PIL), ``thumbnail``, ``bbox``, ``page``, and ``caption``.
|
| 24 |
+
"""
|
| 25 |
+
results: list[dict[str, Any]] = []
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
for fig_info in figures_info:
|
| 29 |
+
pil_image = fig_info.get("image")
|
| 30 |
+
|
| 31 |
+
if pil_image is None:
|
| 32 |
+
print(f"Figure missing image: {fig_info}")
|
| 33 |
+
continue
|
| 34 |
+
|
| 35 |
+
if not isinstance(pil_image, Image.Image):
|
| 36 |
+
print(f"Figure image is not PIL Image: {type(pil_image)}")
|
| 37 |
+
continue
|
| 38 |
+
|
| 39 |
+
thumb = pil_image.copy()
|
| 40 |
+
thumb.thumbnail((200, 200), Image.Resampling.LANCZOS)
|
| 41 |
+
|
| 42 |
+
results.append({
|
| 43 |
+
"image": pil_image,
|
| 44 |
+
"thumbnail": thumb,
|
| 45 |
+
"bbox": fig_info.get("bbox"),
|
| 46 |
+
"page": fig_info.get("page", 0),
|
| 47 |
+
"caption": fig_info.get("caption", ""),
|
| 48 |
+
})
|
| 49 |
+
|
| 50 |
+
print(f"Processed figure {len(results)}")
|
| 51 |
+
|
| 52 |
+
except Exception as e: # noqa: BLE001
|
| 53 |
+
import traceback
|
| 54 |
+
|
| 55 |
+
print(f"Error processing figures: {e}")
|
| 56 |
+
traceback.print_exc()
|
| 57 |
+
|
| 58 |
+
if not results and page_images:
|
| 59 |
+
print("No figures extracted, using page previews as fallback")
|
| 60 |
+
for i, page_img in enumerate(page_images[:3]):
|
| 61 |
+
thumb = page_img.copy()
|
| 62 |
+
thumb.thumbnail((200, 200), Image.Resampling.LANCZOS)
|
| 63 |
+
results.append({
|
| 64 |
+
"image": page_img,
|
| 65 |
+
"thumbnail": thumb,
|
| 66 |
+
"bbox": None,
|
| 67 |
+
"page": i,
|
| 68 |
+
"caption": f"Page {i + 1}",
|
| 69 |
+
})
|
| 70 |
+
|
| 71 |
+
return results
|
src/document_parser.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Docling document parsing with figure extraction and markdown export."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import tempfile
|
| 5 |
+
from collections.abc import Callable
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
_EXT_TO_INPUT_FORMAT = {
|
| 10 |
+
".pdf": "PDF",
|
| 11 |
+
".docx": "DOCX",
|
| 12 |
+
".xlsx": "XLSX",
|
| 13 |
+
".pptx": "PPTX",
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def parse_document(
|
| 18 |
+
file_bytes: bytes,
|
| 19 |
+
file_ext: str = ".pdf",
|
| 20 |
+
on_progress: Callable[[str], None] | None = None,
|
| 21 |
+
) -> dict[str, Any]:
|
| 22 |
+
"""Parse a document with Docling and extract markdown, text, and figure regions.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
file_bytes: Document file content as bytes.
|
| 26 |
+
file_ext: File extension (e.g. ``".pdf"``, ``".docx"``, ``".xlsx"``, ``".pptx"``).
|
| 27 |
+
on_progress: Optional callback ``(phase_message) -> None`` for progress reporting.
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
Dictionary with keys:
|
| 31 |
+
- ``html``: HTML-wrapped markdown representation of the document.
|
| 32 |
+
- ``text``: Full extracted plain text.
|
| 33 |
+
- ``figures``: List of figure dicts with ``bbox``, ``page``, ``caption``, and ``image``.
|
| 34 |
+
"""
|
| 35 |
+
def _report(msg: str) -> None:
|
| 36 |
+
if on_progress:
|
| 37 |
+
on_progress(msg)
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
from docling.datamodel.base_models import InputFormat
|
| 41 |
+
from docling.document_converter import DocumentConverter, PdfFormatOption
|
| 42 |
+
|
| 43 |
+
ext = file_ext.lower()
|
| 44 |
+
with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as tmp:
|
| 45 |
+
tmp.write(file_bytes)
|
| 46 |
+
tmp_path = tmp.name
|
| 47 |
+
|
| 48 |
+
try:
|
| 49 |
+
format_name = _EXT_TO_INPUT_FORMAT.get(ext, "PDF")
|
| 50 |
+
input_format = getattr(InputFormat, format_name)
|
| 51 |
+
|
| 52 |
+
_report("Initializing document converter...")
|
| 53 |
+
|
| 54 |
+
if input_format == InputFormat.PDF:
|
| 55 |
+
pdf_format_option = PdfFormatOption()
|
| 56 |
+
pdf_format_option.pipeline_options.generate_picture_images = True
|
| 57 |
+
pdf_format_option.pipeline_options.images_scale = 2.0
|
| 58 |
+
|
| 59 |
+
# Force CPU to avoid CUDA init outside @spaces.GPU on ZeroGPU spaces
|
| 60 |
+
try:
|
| 61 |
+
from docling.datamodel.pipeline_options import AcceleratorDevice, AcceleratorOptions
|
| 62 |
+
pdf_format_option.pipeline_options.accelerator_options = AcceleratorOptions(
|
| 63 |
+
device=AcceleratorDevice.CPU
|
| 64 |
+
)
|
| 65 |
+
except Exception: # noqa: BLE001
|
| 66 |
+
pass # Older docling versions without AcceleratorOptions
|
| 67 |
+
|
| 68 |
+
converter = DocumentConverter(format_options={InputFormat.PDF: pdf_format_option})
|
| 69 |
+
else:
|
| 70 |
+
converter = DocumentConverter()
|
| 71 |
+
|
| 72 |
+
_report("Converting document (this may take a moment)...")
|
| 73 |
+
result = converter.convert(tmp_path)
|
| 74 |
+
doc = result.document
|
| 75 |
+
|
| 76 |
+
_report("Exporting document content...")
|
| 77 |
+
markdown_text = doc.export_to_markdown()
|
| 78 |
+
html = markdown_text
|
| 79 |
+
text = doc.export_to_text()
|
| 80 |
+
|
| 81 |
+
_report("Processing figures...")
|
| 82 |
+
figures: list[dict[str, Any]] = []
|
| 83 |
+
try:
|
| 84 |
+
if hasattr(doc, "pictures"):
|
| 85 |
+
for figure in doc.pictures:
|
| 86 |
+
if figure.content_layer.value != "body":
|
| 87 |
+
continue
|
| 88 |
+
|
| 89 |
+
page_num = 0
|
| 90 |
+
bbox_list = None
|
| 91 |
+
|
| 92 |
+
if figure.prov:
|
| 93 |
+
page_num = figure.prov[0].page_no - 1 # Docling is 1-based
|
| 94 |
+
bbox = figure.prov[0].bbox
|
| 95 |
+
bbox_list = [bbox.l, bbox.t, bbox.width, bbox.height]
|
| 96 |
+
|
| 97 |
+
caption = ""
|
| 98 |
+
if figure.captions:
|
| 99 |
+
for cap_ref in figure.captions:
|
| 100 |
+
try:
|
| 101 |
+
if hasattr(cap_ref, "cref") and cap_ref.cref.startswith("#/texts/"):
|
| 102 |
+
idx = int(cap_ref.cref.split("/")[-1])
|
| 103 |
+
if idx < len(doc.texts):
|
| 104 |
+
caption = doc.texts[idx].text
|
| 105 |
+
break
|
| 106 |
+
except Exception: # noqa: BLE001
|
| 107 |
+
pass
|
| 108 |
+
|
| 109 |
+
if figure.image:
|
| 110 |
+
try:
|
| 111 |
+
pil_image = figure.image.pil_image
|
| 112 |
+
figures.append({
|
| 113 |
+
"bbox": bbox_list,
|
| 114 |
+
"page": page_num,
|
| 115 |
+
"caption": caption,
|
| 116 |
+
"image": pil_image,
|
| 117 |
+
})
|
| 118 |
+
except Exception: # noqa: BLE001
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
except Exception: # noqa: BLE001
|
| 122 |
+
figures = []
|
| 123 |
+
|
| 124 |
+
return {"html": html, "text": text, "figures": figures}
|
| 125 |
+
|
| 126 |
+
finally:
|
| 127 |
+
if os.path.exists(tmp_path):
|
| 128 |
+
os.unlink(tmp_path)
|
| 129 |
+
|
| 130 |
+
except ImportError as e:
|
| 131 |
+
print(f"Docling import error: {e}, using placeholder")
|
| 132 |
+
return {
|
| 133 |
+
"html": "<h1>Sample Document</h1><p>Docling not available - using placeholder.</p>",
|
| 134 |
+
"text": "Sample text from PDF.\n\nDocling not available - using placeholder.",
|
| 135 |
+
"figures": [],
|
| 136 |
+
}
|
| 137 |
+
except Exception as e: # noqa: BLE001
|
| 138 |
+
import traceback
|
| 139 |
+
|
| 140 |
+
print(f"Docling parse error: {e}")
|
| 141 |
+
traceback.print_exc()
|
| 142 |
+
return {
|
| 143 |
+
"html": f"<h1>Error</h1><pre>{e!s}</pre>",
|
| 144 |
+
"text": f"Error: {e!s}",
|
| 145 |
+
"figures": [],
|
| 146 |
+
}
|
src/infer_chart2csv.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Chart-to-CSV extraction using Granite Vision.
|
| 2 |
+
|
| 3 |
+
Converts chart images to tabular CSV data using ibm-granite/granite-vision-4.1-4b.
|
| 4 |
+
|
| 5 |
+
Same ZeroGPU streaming pattern as infer_vision_qa.py — see that module for details.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import threading
|
| 9 |
+
from collections.abc import Generator
|
| 10 |
+
|
| 11 |
+
import spaces
|
| 12 |
+
import torch
|
| 13 |
+
from PIL import Image
|
| 14 |
+
from transformers import TextIteratorStreamer
|
| 15 |
+
|
| 16 |
+
from model_loader import load_model, load_processor
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@spaces.GPU(duration=120)
|
| 20 |
+
def extract_csv_stream(image: Image.Image) -> Generator[str, None, None]:
|
| 21 |
+
"""Stream CSV extraction token-by-token from a chart image.
|
| 22 |
+
|
| 23 |
+
Runs inside @spaces.GPU. model.generate() runs in a plain background thread.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
image: PIL Image of a chart or table.
|
| 27 |
+
|
| 28 |
+
Yields:
|
| 29 |
+
Accumulated CSV text after each token.
|
| 30 |
+
"""
|
| 31 |
+
processor, model = load_model()
|
| 32 |
+
|
| 33 |
+
if processor is None or model is None:
|
| 34 |
+
yield "col1,col2,col3\nvalue1,value2,value3\nvalue4,value5,value6"
|
| 35 |
+
return
|
| 36 |
+
|
| 37 |
+
try:
|
| 38 |
+
image = image.convert("RGB")
|
| 39 |
+
conversation = [{"role": "user", "content": [
|
| 40 |
+
{"type": "image"},
|
| 41 |
+
{"type": "text", "text": "<chart2csv>"},
|
| 42 |
+
]}]
|
| 43 |
+
text = processor.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 44 |
+
inputs = processor(text=text, images=image, return_tensors="pt").to(model.device)
|
| 45 |
+
|
| 46 |
+
streamer = TextIteratorStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 47 |
+
|
| 48 |
+
def _generate() -> None:
|
| 49 |
+
with torch.inference_mode():
|
| 50 |
+
model.generate(**inputs, max_new_tokens=4096, use_cache=True, streamer=streamer)
|
| 51 |
+
|
| 52 |
+
thread = threading.Thread(target=_generate)
|
| 53 |
+
thread.start()
|
| 54 |
+
|
| 55 |
+
accumulated = ""
|
| 56 |
+
for token in streamer:
|
| 57 |
+
if token:
|
| 58 |
+
accumulated += token
|
| 59 |
+
yield accumulated
|
| 60 |
+
|
| 61 |
+
thread.join()
|
| 62 |
+
|
| 63 |
+
except Exception as e: # noqa: BLE001
|
| 64 |
+
import traceback
|
| 65 |
+
traceback.print_exc()
|
| 66 |
+
yield f"Error: {e!s}"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@spaces.GPU(duration=120)
|
| 70 |
+
def extract_csv(image: Image.Image) -> str:
|
| 71 |
+
"""Extract CSV data from a chart image using Granite Vision (non-streaming).
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
image: PIL Image of a chart or table.
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
CSV-formatted text extracted from the chart.
|
| 78 |
+
"""
|
| 79 |
+
processor, model = load_model()
|
| 80 |
+
|
| 81 |
+
if processor is None or model is None:
|
| 82 |
+
return "col1,col2,col3\nvalue1,value2,value3\nvalue4,value5,value6"
|
| 83 |
+
|
| 84 |
+
try:
|
| 85 |
+
image = image.convert("RGB")
|
| 86 |
+
conversation = [{"role": "user", "content": [
|
| 87 |
+
{"type": "image"},
|
| 88 |
+
{"type": "text", "text": "<chart2csv>"},
|
| 89 |
+
]}]
|
| 90 |
+
text = processor.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 91 |
+
inputs = processor(text=text, images=image, return_tensors="pt").to(model.device)
|
| 92 |
+
|
| 93 |
+
max_new_tokens = 4096
|
| 94 |
+
with torch.inference_mode():
|
| 95 |
+
outputs = model.generate(**inputs, max_new_tokens=max_new_tokens, use_cache=True)
|
| 96 |
+
|
| 97 |
+
gen = outputs[0, inputs["input_ids"].shape[1]:]
|
| 98 |
+
result = processor.decode(gen, skip_special_tokens=True)
|
| 99 |
+
if len(gen) >= max_new_tokens:
|
| 100 |
+
result += "\n\n[Max token limit reached — response may be truncated]"
|
| 101 |
+
return result
|
| 102 |
+
|
| 103 |
+
except Exception as e: # noqa: BLE001
|
| 104 |
+
import traceback
|
| 105 |
+
traceback.print_exc()
|
| 106 |
+
return f"Error: {e!s}"
|
src/infer_vision_qa.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Granite Vision image and chart Q&A.
|
| 2 |
+
|
| 3 |
+
Supports single-turn and multi-turn conversations using ibm-granite/granite-vision-4.1-4b.
|
| 4 |
+
|
| 5 |
+
ZeroGPU streaming pattern:
|
| 6 |
+
- answer_question_stream is decorated with @spaces.GPU so the entire generator
|
| 7 |
+
runs inside the ZeroGPU subprocess with GPU access.
|
| 8 |
+
- model.generate() is called in a plain background thread (no @spaces.GPU needed —
|
| 9 |
+
the subprocess already has GPU access process-wide).
|
| 10 |
+
- The streamer is never passed across process boundaries, avoiding pickling errors.
|
| 11 |
+
- model_loader loads the model on CPU then moves to CUDA to avoid caching_allocator_warmup
|
| 12 |
+
triggering torch._C._cuda_init() before ZeroGPU can intercept it.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import threading
|
| 16 |
+
from collections.abc import Generator
|
| 17 |
+
from typing import Any
|
| 18 |
+
|
| 19 |
+
import spaces
|
| 20 |
+
import torch
|
| 21 |
+
from PIL import Image
|
| 22 |
+
from transformers import TextIteratorStreamer
|
| 23 |
+
|
| 24 |
+
from model_loader import load_model, load_processor
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@spaces.GPU(duration=120)
|
| 28 |
+
def answer_question_stream(
|
| 29 |
+
image: Image.Image,
|
| 30 |
+
question: str,
|
| 31 |
+
conversation_history: list[dict[str, Any]],
|
| 32 |
+
current_image_path: str | None, # noqa: ARG001
|
| 33 |
+
) -> Generator[str, None, None]:
|
| 34 |
+
"""Stream an answer token-by-token about an image using Granite Vision.
|
| 35 |
+
|
| 36 |
+
Runs inside @spaces.GPU so CUDA is available throughout. model.generate()
|
| 37 |
+
runs in a background thread (GPU is process-wide in the ZeroGPU subprocess).
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
image: PIL Image to query.
|
| 41 |
+
question: Question string to ask.
|
| 42 |
+
conversation_history: Prior conversation turns (role/content dicts).
|
| 43 |
+
current_image_path: Unused — kept for API compatibility.
|
| 44 |
+
|
| 45 |
+
Yields:
|
| 46 |
+
Accumulated answer text after each token.
|
| 47 |
+
"""
|
| 48 |
+
processor, model = load_model()
|
| 49 |
+
|
| 50 |
+
if processor is None or model is None:
|
| 51 |
+
yield f"[STUB] Question: {question}\n\nThis is a placeholder response. Model not loaded."
|
| 52 |
+
return
|
| 53 |
+
|
| 54 |
+
try:
|
| 55 |
+
image = image.convert("RGB")
|
| 56 |
+
|
| 57 |
+
if not conversation_history:
|
| 58 |
+
new_user_turn = {"role": "user", "content": [
|
| 59 |
+
{"type": "image"},
|
| 60 |
+
{"type": "text", "text": question},
|
| 61 |
+
]}
|
| 62 |
+
else:
|
| 63 |
+
new_user_turn = {"role": "user", "content": [
|
| 64 |
+
{"type": "text", "text": question},
|
| 65 |
+
]}
|
| 66 |
+
|
| 67 |
+
conversation = [*conversation_history, new_user_turn]
|
| 68 |
+
text = processor.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 69 |
+
inputs = processor(text=text, images=image, return_tensors="pt").to(model.device)
|
| 70 |
+
|
| 71 |
+
streamer = TextIteratorStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 72 |
+
|
| 73 |
+
def _generate() -> None:
|
| 74 |
+
with torch.inference_mode():
|
| 75 |
+
model.generate(**inputs, max_new_tokens=4096, use_cache=True, streamer=streamer)
|
| 76 |
+
|
| 77 |
+
thread = threading.Thread(target=_generate)
|
| 78 |
+
thread.start()
|
| 79 |
+
|
| 80 |
+
accumulated = ""
|
| 81 |
+
for token in streamer:
|
| 82 |
+
if token:
|
| 83 |
+
accumulated += token
|
| 84 |
+
yield accumulated
|
| 85 |
+
|
| 86 |
+
thread.join()
|
| 87 |
+
|
| 88 |
+
except Exception as e: # noqa: BLE001
|
| 89 |
+
import traceback
|
| 90 |
+
traceback.print_exc()
|
| 91 |
+
yield f"Error: {e!s}"
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@spaces.GPU(duration=120)
|
| 95 |
+
def answer_question(
|
| 96 |
+
image: Image.Image,
|
| 97 |
+
question: str,
|
| 98 |
+
conversation_history: list[dict[str, Any]],
|
| 99 |
+
current_image_path: str | None, # noqa: ARG001
|
| 100 |
+
) -> tuple[str, list[dict[str, Any]], str | None]:
|
| 101 |
+
"""Answer a question about an image using Granite Vision (non-streaming).
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
image: PIL Image to query.
|
| 105 |
+
question: Question string to ask.
|
| 106 |
+
conversation_history: Prior conversation turns (role/content dicts).
|
| 107 |
+
current_image_path: Unused — kept for API compatibility.
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
Tuple of (answer_text, updated_history, None).
|
| 111 |
+
"""
|
| 112 |
+
processor, model = load_model()
|
| 113 |
+
|
| 114 |
+
if processor is None or model is None:
|
| 115 |
+
stub = f"[STUB] Question: {question}\n\nThis is a placeholder response. Model not loaded."
|
| 116 |
+
return stub, conversation_history, None
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
image = image.convert("RGB")
|
| 120 |
+
|
| 121 |
+
if not conversation_history:
|
| 122 |
+
new_user_turn = {"role": "user", "content": [
|
| 123 |
+
{"type": "image"},
|
| 124 |
+
{"type": "text", "text": question},
|
| 125 |
+
]}
|
| 126 |
+
else:
|
| 127 |
+
new_user_turn = {"role": "user", "content": [
|
| 128 |
+
{"type": "text", "text": question},
|
| 129 |
+
]}
|
| 130 |
+
|
| 131 |
+
conversation = [*conversation_history, new_user_turn]
|
| 132 |
+
text = processor.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 133 |
+
inputs = processor(text=text, images=image, return_tensors="pt").to(model.device)
|
| 134 |
+
|
| 135 |
+
max_new_tokens = 4096
|
| 136 |
+
with torch.inference_mode():
|
| 137 |
+
outputs = model.generate(**inputs, max_new_tokens=max_new_tokens, use_cache=True)
|
| 138 |
+
|
| 139 |
+
gen = outputs[0, inputs["input_ids"].shape[1]:]
|
| 140 |
+
answer = processor.decode(gen, skip_special_tokens=True)
|
| 141 |
+
if len(gen) >= max_new_tokens:
|
| 142 |
+
answer += "\n\n[Max token limit reached — response may be truncated]"
|
| 143 |
+
|
| 144 |
+
updated_history = [
|
| 145 |
+
*conversation_history,
|
| 146 |
+
new_user_turn,
|
| 147 |
+
{"role": "assistant", "content": [{"type": "text", "text": answer}]},
|
| 148 |
+
]
|
| 149 |
+
|
| 150 |
+
return answer, updated_history, None
|
| 151 |
+
|
| 152 |
+
except Exception as e: # noqa: BLE001
|
| 153 |
+
import traceback
|
| 154 |
+
traceback.print_exc()
|
| 155 |
+
return f"Error: {e!s}", conversation_history, None
|
src/model_loader.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Shared Granite Vision model loader.
|
| 2 |
+
|
| 3 |
+
Split into two stages for ZeroGPU compatibility:
|
| 4 |
+
- load_processor(): CPU-only, safe to call at startup or outside @spaces.GPU
|
| 5 |
+
- load_model(): requires CUDA, must only be called inside a @spaces.GPU context
|
| 6 |
+
|
| 7 |
+
The processor and model are cached globally so they are loaded at most once.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
from typing import Any
|
| 12 |
+
|
| 13 |
+
_processor: Any = None
|
| 14 |
+
_model: Any = None
|
| 15 |
+
|
| 16 |
+
MODEL_ID = "ibm-granite/granite-vision-4.1-4b"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def load_processor() -> Any:
|
| 20 |
+
"""Load (or return cached) processor. CPU-only — safe outside @spaces.GPU.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
AutoProcessor instance, or None if loading fails.
|
| 24 |
+
"""
|
| 25 |
+
global _processor # noqa: PLW0603
|
| 26 |
+
|
| 27 |
+
if _processor is not None:
|
| 28 |
+
return _processor
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
from transformers import AutoProcessor
|
| 32 |
+
|
| 33 |
+
token = os.environ.get("HF_TOKEN")
|
| 34 |
+
_processor = AutoProcessor.from_pretrained(
|
| 35 |
+
MODEL_ID, trust_remote_code=True, token=token, use_fast=True
|
| 36 |
+
)
|
| 37 |
+
print(f"Processor loaded for {MODEL_ID}")
|
| 38 |
+
return _processor
|
| 39 |
+
|
| 40 |
+
except Exception as e: # noqa: BLE001
|
| 41 |
+
import traceback
|
| 42 |
+
print(f"Processor load error: {e}")
|
| 43 |
+
traceback.print_exc()
|
| 44 |
+
return None
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def load_model() -> tuple[Any, Any]:
|
| 48 |
+
"""Load (or return cached) model to CUDA. Must be called inside @spaces.GPU.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
Tuple of (processor, model), or (None, None) if loading fails.
|
| 52 |
+
"""
|
| 53 |
+
global _model # noqa: PLW0603
|
| 54 |
+
|
| 55 |
+
processor = load_processor()
|
| 56 |
+
if processor is None:
|
| 57 |
+
return None, None
|
| 58 |
+
|
| 59 |
+
if _model is not None:
|
| 60 |
+
return processor, _model
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
import torch
|
| 64 |
+
from transformers import AutoModelForImageTextToText
|
| 65 |
+
|
| 66 |
+
token = os.environ.get("HF_TOKEN")
|
| 67 |
+
# Load on CPU first to avoid caching_allocator_warmup triggering
|
| 68 |
+
# torch._C._cuda_init() before ZeroGPU can intercept it.
|
| 69 |
+
_model = AutoModelForImageTextToText.from_pretrained(
|
| 70 |
+
MODEL_ID,
|
| 71 |
+
trust_remote_code=True,
|
| 72 |
+
dtype=torch.bfloat16,
|
| 73 |
+
token=token,
|
| 74 |
+
).eval()
|
| 75 |
+
_model = _model.to("cuda")
|
| 76 |
+
|
| 77 |
+
if hasattr(_model, "merge_lora_adapters"):
|
| 78 |
+
_model = _model.merge_lora_adapters()
|
| 79 |
+
|
| 80 |
+
print(f"Model loaded: {MODEL_ID} on cuda")
|
| 81 |
+
return processor, _model
|
| 82 |
+
|
| 83 |
+
except Exception as e: # noqa: BLE001
|
| 84 |
+
import traceback
|
| 85 |
+
print(f"Model load error: {e}")
|
| 86 |
+
traceback.print_exc()
|
| 87 |
+
return processor, None
|
src/pdf_io.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PDF I/O utilities for loading and rendering PDFs."""
|
| 2 |
+
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def load_pdf_pages(pdf_bytes: bytes, dpi: int = 150, max_pages: int = 10) -> list[Image.Image]:
|
| 7 |
+
"""Load PDF and render each page to a PIL Image.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
pdf_bytes: PDF file as bytes.
|
| 11 |
+
dpi: Resolution for rendering.
|
| 12 |
+
max_pages: Maximum number of pages to render.
|
| 13 |
+
|
| 14 |
+
Returns:
|
| 15 |
+
List of PIL Images, one per page.
|
| 16 |
+
"""
|
| 17 |
+
try:
|
| 18 |
+
import fitz # PyMuPDF
|
| 19 |
+
|
| 20 |
+
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
|
| 21 |
+
pages = []
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
for i in range(min(len(doc), max_pages)):
|
| 25 |
+
page = doc[i]
|
| 26 |
+
pix = page.get_pixmap(matrix=fitz.Matrix(dpi / 72, dpi / 72))
|
| 27 |
+
img = Image.frombytes("RGB", (pix.width, pix.height), pix.samples)
|
| 28 |
+
pages.append(img)
|
| 29 |
+
finally:
|
| 30 |
+
doc.close()
|
| 31 |
+
|
| 32 |
+
return pages
|
| 33 |
+
|
| 34 |
+
except ImportError:
|
| 35 |
+
print("PyMuPDF not available, returning placeholder")
|
| 36 |
+
img = Image.new("RGB", (800, 1000), color=(200, 200, 200))
|
| 37 |
+
return [img] * 3
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_page_count(pdf_bytes: bytes) -> int:
|
| 41 |
+
"""Get total page count of a PDF.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
pdf_bytes: PDF file as bytes.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
Total number of pages, or 0 on error.
|
| 48 |
+
"""
|
| 49 |
+
try:
|
| 50 |
+
import fitz
|
| 51 |
+
|
| 52 |
+
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
|
| 53 |
+
count = len(doc)
|
| 54 |
+
doc.close()
|
| 55 |
+
return count
|
| 56 |
+
except Exception: # noqa: BLE001
|
| 57 |
+
return 0
|
src/themes/carbon.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""IBM Carbon theme for gradio demos.
|
| 2 |
+
|
| 3 |
+
This version builds on top of the Carbon theme to make it more playful with rounded corners, a larger font family to
|
| 4 |
+
enhance readability, and the IBM Cool Gray color palette for better consistency with other IBM Research demos, such as
|
| 5 |
+
Bee.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import gradio as gr
|
| 9 |
+
from gradio.themes.utils import sizes
|
| 10 |
+
|
| 11 |
+
theme = gr.themes.Base(
|
| 12 |
+
primary_hue=gr.themes.Color(
|
| 13 |
+
c100="#EDF5FF",
|
| 14 |
+
c200="#D0E2FF",
|
| 15 |
+
c300="#A6C8FF",
|
| 16 |
+
c400="#78A9FF",
|
| 17 |
+
c50="#F9F9FB",
|
| 18 |
+
c500="#4589FF",
|
| 19 |
+
c600="#0F62FE",
|
| 20 |
+
c700="#0043CE",
|
| 21 |
+
c800="#002D9C",
|
| 22 |
+
c900="#001D6C",
|
| 23 |
+
c950="#001141",
|
| 24 |
+
),
|
| 25 |
+
secondary_hue=gr.themes.Color(
|
| 26 |
+
c100="#EDF5FF",
|
| 27 |
+
c200="#D0E2FF",
|
| 28 |
+
c300="#A6C8FF",
|
| 29 |
+
c400="#78A9FF",
|
| 30 |
+
c50="#F9F9FB",
|
| 31 |
+
c500="#4589FF",
|
| 32 |
+
c600="#0F62FE",
|
| 33 |
+
c700="#0043CE",
|
| 34 |
+
c800="#002D9C",
|
| 35 |
+
c900="#001D6C",
|
| 36 |
+
c950="#001141",
|
| 37 |
+
),
|
| 38 |
+
neutral_hue=gr.themes.Color(
|
| 39 |
+
c100="#F2F4F8",
|
| 40 |
+
c200="#DDE1E6",
|
| 41 |
+
c300="#C1C7CD",
|
| 42 |
+
c400="#A2A9B0",
|
| 43 |
+
c50="#F9F9FB",
|
| 44 |
+
c500="#878D96",
|
| 45 |
+
c600="#697077",
|
| 46 |
+
c700="#4D5358",
|
| 47 |
+
c800="#393939",
|
| 48 |
+
c900="#21272A",
|
| 49 |
+
c950="#121619",
|
| 50 |
+
),
|
| 51 |
+
spacing_size=sizes.spacing_md, # change spacing to default size
|
| 52 |
+
radius_size=sizes.radius_md, # change spacing to default size and Keep Radius to make demo feel more playful
|
| 53 |
+
text_size=sizes.text_lg, # change fontsize to default size
|
| 54 |
+
# spacing_size: sizes.Size | str = sizes.spacing_md, #change spacing to default size
|
| 55 |
+
# radius_size: sizes.Size | str = sizes.radius_md, #change spacing to default size and Keep Radius to make
|
| 56 |
+
# demo feel more playful
|
| 57 |
+
# text_size: sizes.Size | str = sizes.text_lg, #change fontsize to default size
|
| 58 |
+
font=["IBM Plex Sans", "ui-sans-serif", "system-ui", "sans-serif"], # update font
|
| 59 |
+
font_mono=["IBM Plex Mono", "ui-monospace", "Consolas", "monospace"], # update font
|
| 60 |
+
).set(
|
| 61 |
+
# Colors
|
| 62 |
+
background_fill_primary="*neutral_100", # Coolgray10 background
|
| 63 |
+
background_fill_primary_dark="*neutral_950", # Coolgray95 background for dark mode
|
| 64 |
+
slider_color="*primary_600", # Blue60
|
| 65 |
+
slider_color_dark="*primary_500", # Blue50
|
| 66 |
+
# Shadows
|
| 67 |
+
shadow_drop="0 1px 4px 0 rgb(0 0 0 / 0.1)",
|
| 68 |
+
shadow_drop_lg="0 2px 5px 0 rgb(0 0 0 / 0.1)",
|
| 69 |
+
# Block Labels
|
| 70 |
+
block_background_fill="white",
|
| 71 |
+
block_label_background_fill="white", # same color as blockback gound fill
|
| 72 |
+
block_label_radius="*radius_md",
|
| 73 |
+
block_label_text_size="*text_md",
|
| 74 |
+
block_label_text_weight="600",
|
| 75 |
+
block_label_text_color="black",
|
| 76 |
+
block_label_text_color_dark="white",
|
| 77 |
+
block_title_radius="*block_label_radius",
|
| 78 |
+
block_title_background_fill="*block_label_background_fill",
|
| 79 |
+
block_title_text_weight="600",
|
| 80 |
+
block_title_text_color="black",
|
| 81 |
+
block_title_text_color_dark="white",
|
| 82 |
+
block_label_margin="*spacing_md",
|
| 83 |
+
# Inputs
|
| 84 |
+
input_background_fill="white",
|
| 85 |
+
input_background_fill_dark="*block-background-fill",
|
| 86 |
+
input_border_color="*neutral_100",
|
| 87 |
+
input_shadow="*shadow_drop",
|
| 88 |
+
input_shadow_focus="*shadow_drop_lg",
|
| 89 |
+
checkbox_shadow="none",
|
| 90 |
+
# Buttons
|
| 91 |
+
shadow_spread="6px",
|
| 92 |
+
button_primary_shadow="*shadow_drop_lg",
|
| 93 |
+
button_primary_shadow_hover="*shadow_drop_lg",
|
| 94 |
+
button_primary_shadow_active="*shadow_inset",
|
| 95 |
+
button_secondary_shadow="*shadow_drop_lg",
|
| 96 |
+
button_secondary_shadow_hover="*shadow_drop_lg",
|
| 97 |
+
button_secondary_shadow_active="*shadow_inset",
|
| 98 |
+
checkbox_label_shadow="*shadow_drop_lg",
|
| 99 |
+
button_primary_background_fill="*primary_600",
|
| 100 |
+
button_primary_background_fill_hover="*primary_500",
|
| 101 |
+
button_primary_background_fill_hover_dark="*primary_500",
|
| 102 |
+
button_primary_text_color="white",
|
| 103 |
+
button_secondary_background_fill="white",
|
| 104 |
+
button_secondary_background_fill_hover="*neutral_100",
|
| 105 |
+
button_secondary_background_fill_dark="*neutral_800", # Secondary cool gray 80
|
| 106 |
+
button_secondary_background_fill_hover_dark="*primary_500",
|
| 107 |
+
button_secondary_text_color="*neutral_800",
|
| 108 |
+
button_cancel_background_fill="*button_secondary_background_fill",
|
| 109 |
+
button_cancel_background_fill_hover="*button_secondary_background_fill_hover",
|
| 110 |
+
button_cancel_background_fill_hover_dark="*button_secondary_background_fill_hover",
|
| 111 |
+
button_cancel_text_color="*button_secondary_text_color",
|
| 112 |
+
checkbox_label_background_fill_selected="*primary_200",
|
| 113 |
+
checkbox_label_background_fill_selected_dark="*primary_500",
|
| 114 |
+
checkbox_border_width="1px",
|
| 115 |
+
checkbox_border_color="*neutral_200",
|
| 116 |
+
checkbox_background_color_dark="*neutral_700", # Jan 18 test to fix checkbox, radio button background color
|
| 117 |
+
checkbox_background_color_selected="*primary_600",
|
| 118 |
+
checkbox_background_color_selected_dark="*primary_500",
|
| 119 |
+
checkbox_border_color_focus="*primary_600",
|
| 120 |
+
checkbox_border_color_focus_dark="*primary_500",
|
| 121 |
+
checkbox_border_color_selected="*primary_600",
|
| 122 |
+
checkbox_border_color_selected_dark="*primary_500",
|
| 123 |
+
checkbox_label_text_color_selected="black",
|
| 124 |
+
# Borders
|
| 125 |
+
block_border_width="1px", # test example border
|
| 126 |
+
panel_border_width="1px",
|
| 127 |
+
# Chatbubble related colors
|
| 128 |
+
# light
|
| 129 |
+
# color_accent = "*secondary_400",
|
| 130 |
+
border_color_accent_subdued="*color_accent_soft", # chatbubble human border color, use Blue 20 as an accent color
|
| 131 |
+
color_accent_soft="*secondary_200", # chatbubble human color
|
| 132 |
+
# darkmode
|
| 133 |
+
# chatbubble human border color in darkmode, use Blue 20 as an accent color
|
| 134 |
+
border_color_accent_subdued_dark="*secondary_500",
|
| 135 |
+
color_accent_soft_dark="*secondary_500", # chatbubble human color in dark mode
|
| 136 |
+
# Chatbot related font
|
| 137 |
+
chatbot_text_size="*text_md", # make it larger
|
| 138 |
+
# additional dark mode related tweaks:
|
| 139 |
+
# block_background_fill_dark="*neutral_950", # Jan 18 test coolgray95 background for dark mode
|
| 140 |
+
block_label_background_fill_dark="*neutral_800", # same color as blockback gound fill
|
| 141 |
+
block_title_background_fill_dark="*block_label_background_fill",
|
| 142 |
+
# input_background_fill_dark="*neutral_800", #This attribute help match fill color cool gray 80 to match background
|
| 143 |
+
# however cause the problem for the general theme.
|
| 144 |
+
# input_shadow_dark="*shadow_drop", #Test if it could make the border without the color
|
| 145 |
+
# input_border_color_dark="*neutral_200",#add attribute for border Jan 18
|
| 146 |
+
checkbox_border_color_dark="*neutral_600", # Jan 18 test to fix border
|
| 147 |
+
)
|
src/themes/research_monochrome.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""IBM Research Monochrome theme for gradio demos.
|
| 2 |
+
|
| 3 |
+
This version is a variation of CarbonSoft style, where the primary button is dark gray to create monochrome style. This
|
| 4 |
+
version uses the style from Research demos such as Bee to make it more playful with rounded corners, a larger font
|
| 5 |
+
family to enhance readability, and the IBM Cool Gray color palette for better consistency with other IBM Research demos,
|
| 6 |
+
such as Bee.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import gradio as gr
|
| 10 |
+
from gradio.themes.utils import sizes
|
| 11 |
+
|
| 12 |
+
theme = gr.themes.Base(
|
| 13 |
+
primary_hue=gr.themes.Color(
|
| 14 |
+
c100="#EDF5FF",
|
| 15 |
+
c200="#D0E2FF",
|
| 16 |
+
c300="#A6C8FF",
|
| 17 |
+
c400="#78A9FF",
|
| 18 |
+
c50="#F9F9FB",
|
| 19 |
+
c500="#4589FF",
|
| 20 |
+
c600="#0F62FE",
|
| 21 |
+
c700="#0043CE",
|
| 22 |
+
c800="#002D9C",
|
| 23 |
+
c900="#001D6C",
|
| 24 |
+
c950="#001141",
|
| 25 |
+
),
|
| 26 |
+
secondary_hue=gr.themes.Color(
|
| 27 |
+
c100="#EDF5FF",
|
| 28 |
+
c200="#D0E2FF",
|
| 29 |
+
c300="#A6C8FF",
|
| 30 |
+
c400="#78A9FF",
|
| 31 |
+
c50="#F9F9FB",
|
| 32 |
+
c500="#4589FF",
|
| 33 |
+
c600="#0F62FE",
|
| 34 |
+
c700="#0043CE",
|
| 35 |
+
c800="#002D9C",
|
| 36 |
+
c900="#001D6C",
|
| 37 |
+
c950="#001141",
|
| 38 |
+
),
|
| 39 |
+
neutral_hue=gr.themes.Color(
|
| 40 |
+
c100="#F2F4F8",
|
| 41 |
+
c200="#DDE1E6",
|
| 42 |
+
c300="#C1C7CD",
|
| 43 |
+
c400="#A2A9B0",
|
| 44 |
+
c50="#F9F9FB",
|
| 45 |
+
c500="#878D96",
|
| 46 |
+
c600="#697077",
|
| 47 |
+
c700="#4D5358",
|
| 48 |
+
c800="#393939",
|
| 49 |
+
c900="#21272A",
|
| 50 |
+
c950="#121619",
|
| 51 |
+
),
|
| 52 |
+
spacing_size=sizes.spacing_md, # change spacing to default size
|
| 53 |
+
radius_size=sizes.radius_md, # change spacing to default size and Keep Radius to make demo feel more playful
|
| 54 |
+
text_size=sizes.text_md, # change fontsize to default size
|
| 55 |
+
# spacing_size: sizes.Size | str = sizes.spacing_md, #change spacing to default size
|
| 56 |
+
# radius_size: sizes.Size | str = sizes.radius_md, #change spacing to default size and Keep Radius to make
|
| 57 |
+
# demo feel more playful
|
| 58 |
+
# text_size: sizes.Size | str = sizes.text_lg, #change fontsize to default size
|
| 59 |
+
font=["IBM Plex Sans", "ui-sans-serif", "system-ui", "sans-serif"], # update font
|
| 60 |
+
font_mono=["IBM Plex Mono", "ui-monospace", "Consolas", "monospace"], # update font
|
| 61 |
+
).set(
|
| 62 |
+
# Colors
|
| 63 |
+
background_fill_primary="*neutral_100", # Coolgray10 background
|
| 64 |
+
background_fill_primary_dark="*neutral_950", # Coolgray95 background for dark mode
|
| 65 |
+
# Change blue to black to create monochrome style
|
| 66 |
+
slider_color="*neutral_900",
|
| 67 |
+
slider_color_dark="*primary_500",
|
| 68 |
+
# Shadows
|
| 69 |
+
shadow_drop="0 1px 4px 0 rgb(0 0 0 / 0.1)",
|
| 70 |
+
shadow_drop_lg="0 2px 5px 0 rgb(0 0 0 / 0.1)",
|
| 71 |
+
# Block Labels
|
| 72 |
+
block_background_fill="white",
|
| 73 |
+
block_label_background_fill="white", # same color as blockback gound fill
|
| 74 |
+
block_label_radius="*radius_md",
|
| 75 |
+
block_label_text_size="*text_md",
|
| 76 |
+
block_label_text_weight="600",
|
| 77 |
+
block_label_text_color="black",
|
| 78 |
+
block_label_text_color_dark="white",
|
| 79 |
+
block_title_radius="*block_label_radius",
|
| 80 |
+
block_title_background_fill="*block_label_background_fill",
|
| 81 |
+
block_title_text_weight="400",
|
| 82 |
+
block_title_text_color="black",
|
| 83 |
+
block_title_text_color_dark="white",
|
| 84 |
+
block_label_margin="*spacing_md",
|
| 85 |
+
# Inputs
|
| 86 |
+
input_background_fill="white",
|
| 87 |
+
input_background_fill_dark="*block-background-fill",
|
| 88 |
+
input_border_color="*neutral_100",
|
| 89 |
+
input_shadow="*shadow_drop",
|
| 90 |
+
input_shadow_dark="0 1px 4px #000",
|
| 91 |
+
input_shadow_focus="*shadow_drop_lg",
|
| 92 |
+
checkbox_shadow="none",
|
| 93 |
+
# Buttons
|
| 94 |
+
shadow_spread="6px",
|
| 95 |
+
button_primary_shadow="*shadow_drop_lg",
|
| 96 |
+
button_primary_shadow_hover="*shadow_drop_lg",
|
| 97 |
+
button_primary_shadow_active="*shadow_inset",
|
| 98 |
+
button_secondary_shadow="*shadow_drop_lg",
|
| 99 |
+
button_secondary_shadow_hover="*shadow_drop_lg",
|
| 100 |
+
button_secondary_shadow_active="*shadow_inset",
|
| 101 |
+
checkbox_label_shadow="*shadow_drop_lg",
|
| 102 |
+
# Change blue to black to create monochrome style
|
| 103 |
+
button_primary_background_fill="*neutral_900",
|
| 104 |
+
button_primary_background_fill_dark="*neutral_600",
|
| 105 |
+
button_primary_background_fill_hover="*neutral_700",
|
| 106 |
+
button_primary_background_fill_hover_dark="*primary_500", # hover to be blue
|
| 107 |
+
button_primary_text_color="white",
|
| 108 |
+
button_secondary_background_fill="white",
|
| 109 |
+
button_secondary_background_fill_hover="*neutral_100",
|
| 110 |
+
button_secondary_background_fill_dark="*neutral_800", # Secondary cool gray 80
|
| 111 |
+
button_secondary_background_fill_hover_dark="*primary_500",
|
| 112 |
+
button_secondary_text_color="*neutral_800",
|
| 113 |
+
button_cancel_background_fill="*button_secondary_background_fill",
|
| 114 |
+
button_cancel_background_fill_hover="*button_secondary_background_fill_hover",
|
| 115 |
+
button_cancel_background_fill_hover_dark="*button_secondary_background_fill_hover",
|
| 116 |
+
button_cancel_text_color="*button_secondary_text_color",
|
| 117 |
+
checkbox_label_background_fill_selected="*primary_200",
|
| 118 |
+
checkbox_label_background_fill_selected_dark="*primary_500",
|
| 119 |
+
checkbox_border_width="1px",
|
| 120 |
+
checkbox_border_color="*neutral_200",
|
| 121 |
+
checkbox_background_color_dark="*neutral_700", # Jan 18 test to fix checkbox, radio button background color
|
| 122 |
+
checkbox_background_color_selected="*primary_600",
|
| 123 |
+
checkbox_background_color_selected_dark="*primary_500",
|
| 124 |
+
checkbox_border_color_focus="*primary_600",
|
| 125 |
+
checkbox_border_color_focus_dark="*primary_500",
|
| 126 |
+
checkbox_border_color_selected="*primary_600",
|
| 127 |
+
checkbox_border_color_selected_dark="*primary_500",
|
| 128 |
+
checkbox_label_text_color_selected="black",
|
| 129 |
+
# Borders
|
| 130 |
+
block_border_width="1px", # test example border
|
| 131 |
+
panel_border_width="1px",
|
| 132 |
+
# Chatbubble related colors
|
| 133 |
+
# light
|
| 134 |
+
# color_accent = "*secondary_400",
|
| 135 |
+
border_color_accent_subdued="*color_accent_soft", # chatbubble human border color, use Blue 20 as an accent color
|
| 136 |
+
color_accent_soft="*secondary_200", # chatbubble human color
|
| 137 |
+
# darkmode
|
| 138 |
+
# chatbubble human border color in darkmode, use Blue 20 as an accent color
|
| 139 |
+
border_color_accent_subdued_dark="*secondary_500",
|
| 140 |
+
color_accent_soft_dark="*secondary_500", # chatbubble human color in dark mode
|
| 141 |
+
# Chatbot related font
|
| 142 |
+
chatbot_text_size="*text_md", # make it larger
|
| 143 |
+
# additional dark mode related tweaks:
|
| 144 |
+
# block_background_fill_dark="*neutral_950", # Jan 18 test coolgray95 background for dark mode
|
| 145 |
+
block_label_background_fill_dark="*neutral_800", # same color as blockback gound fill
|
| 146 |
+
block_title_background_fill_dark="*block_label_background_fill",
|
| 147 |
+
# input_background_fill_dark="*neutral_800", #This attribute help match fill color cool gray 80 to match background
|
| 148 |
+
# however cause the problem for the general theme.
|
| 149 |
+
# input_shadow_dark="*shadow_drop", #Test if it could make the border without the color
|
| 150 |
+
# input_border_color_dark="*neutral_200",#add attribute for border Jan 18
|
| 151 |
+
checkbox_border_color_dark="*neutral_600", # Jan 18 test to fix border
|
| 152 |
+
)
|
src/ui_state.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""UI state management for the document intelligence demo.
|
| 2 |
+
|
| 3 |
+
Per-session state via factory function + shared caches.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import hashlib
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def create_initial_state() -> dict:
|
| 10 |
+
"""Create a fresh per-session state dictionary.
|
| 11 |
+
|
| 12 |
+
Returns:
|
| 13 |
+
Initial state with all fields set to defaults.
|
| 14 |
+
"""
|
| 15 |
+
return {
|
| 16 |
+
"uploaded_file_hash": None,
|
| 17 |
+
"uploaded_file_bytes": None,
|
| 18 |
+
"parsed_result": {},
|
| 19 |
+
"page_images": [],
|
| 20 |
+
"figures_info": [],
|
| 21 |
+
"selected_figure": None,
|
| 22 |
+
"last_csv": None,
|
| 23 |
+
"current_figure_index": 0,
|
| 24 |
+
"conversation_history": [],
|
| 25 |
+
"current_image_path": None,
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Module-level shared caches (keyed by content hash, safe to share across sessions)
|
| 30 |
+
parse_cache: dict[str, dict] = {}
|
| 31 |
+
page_cache: dict[str, list] = {}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def hash_bytes(data: bytes) -> str:
|
| 35 |
+
"""Generate SHA256 hash of bytes.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
data: Bytes to hash.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
Hex string of SHA256 hash.
|
| 42 |
+
"""
|
| 43 |
+
return hashlib.sha256(data).hexdigest()
|