Vizan Deployer commited on
Commit
1b12df6
·
0 Parent(s):

Initial Release for Hugging Face

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/ISSUE_TEMPLATE/bug-report.yml +62 -0
  2. .github/ISSUE_TEMPLATE/config.yml +11 -0
  3. .github/dependabot.yml +13 -0
  4. .github/workflows/build_publish_develop_docs.yml +31 -0
  5. .github/workflows/build_publish_release_docs.yml +31 -0
  6. .github/workflows/close_inactive_issues.yaml +23 -0
  7. .github/workflows/codestyle.yml +36 -0
  8. .github/workflows/link-check.yml +35 -0
  9. .github/workflows/python-publish.yml +39 -0
  10. .github/workflows/test_gpu.yml +73 -0
  11. .github/workflows/tests.yaml +53 -0
  12. .gitignore +43 -0
  13. .lycheeignore +5 -0
  14. .pre-commit-config.yaml +45 -0
  15. .streamlit/config.toml +2 -0
  16. .style.yapf +3 -0
  17. CNAME +1 -0
  18. Dockerfile +22 -0
  19. LICENSE +201 -0
  20. MANIFEST.in +22 -0
  21. README.md +546 -0
  22. applications/README.md +1 -0
  23. awesome_projects.md +30 -0
  24. configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml +98 -0
  25. configs/cls/cls_mv3.yml +94 -0
  26. configs/det/PP-OCRv3/PP-OCRv3_det_cml.yml +226 -0
  27. configs/det/PP-OCRv3/PP-OCRv3_det_dml.yml +174 -0
  28. configs/det/PP-OCRv3/PP-OCRv3_mobile_det.yml +165 -0
  29. configs/det/PP-OCRv3/PP-OCRv3_server_det.yml +160 -0
  30. configs/det/PP-OCRv4/PP-OCRv4_det_cml.yml +240 -0
  31. configs/det/PP-OCRv4/PP-OCRv4_mobile_det.yml +174 -0
  32. configs/det/PP-OCRv4/PP-OCRv4_mobile_seal_det.yml +171 -0
  33. configs/det/PP-OCRv4/PP-OCRv4_server_det.yml +175 -0
  34. configs/det/PP-OCRv4/PP-OCRv4_server_seal_det.yml +171 -0
  35. configs/det/PP-OCRv5/PP-OCRv5_mobile_det.yml +174 -0
  36. configs/det/PP-OCRv5/PP-OCRv5_server_det.yml +173 -0
  37. configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml +206 -0
  38. configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_distill.yml +175 -0
  39. configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_dml.yml +178 -0
  40. configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_student.yml +132 -0
  41. configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml +132 -0
  42. configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml +131 -0
  43. configs/det/det_mv3_db.yml +133 -0
  44. configs/det/det_mv3_east.yml +109 -0
  45. configs/det/det_mv3_pse.yml +135 -0
  46. configs/det/det_r18_vd_ct.yml +107 -0
  47. configs/det/det_r50_db++_icdar15.yml +164 -0
  48. configs/det/det_r50_db++_td_tr.yml +167 -0
  49. configs/det/det_r50_drrg_ctw.yml +133 -0
  50. configs/det/det_r50_vd_db.yml +128 -0
.github/ISSUE_TEMPLATE/bug-report.yml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PaddleOCR
2
+
3
+ name: 🐛 Bug Report
4
+ description: Problems with PaddleOCR
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thank you for submitting a PaddleOCR 🐛 Bug Report!
10
+
11
+ - type: checkboxes
12
+ attributes:
13
+ label: 🔎 Search before asking
14
+ description: >
15
+ Please search the PaddleOCR [Docs](https://paddlepaddle.github.io/PaddleOCR/), [Issues](https://github.com/PaddlePaddle/PaddleOCR/issues) and [Discussions](https://github.com/PaddlePaddle/PaddleOCR/discussions) to see if a similar bug report already exists.
16
+ options:
17
+ - label: I have searched the PaddleOCR [Docs](https://paddlepaddle.github.io/PaddleOCR/) and found no similar bug report.
18
+ required: true
19
+ - label: I have searched the PaddleOCR [Issues](https://github.com/PaddlePaddle/PaddleOCR/issues) and found no similar bug report.
20
+ required: true
21
+ - label: I have searched the PaddleOCR [Discussions](https://github.com/PaddlePaddle/PaddleOCR/discussions) and found no similar bug report.
22
+ required: true
23
+
24
+ - type: textarea
25
+ attributes:
26
+ label: 🐛 Bug (问题描述)
27
+ description: Provide console output with error messages and/or screenshots of the bug. (请提供详细报错信息或者截图)
28
+ placeholder: |
29
+ 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
30
+ validations:
31
+ required: true
32
+
33
+ - type: textarea
34
+ attributes:
35
+ label: 🏃‍♂️ Environment (运行环境)
36
+ description: Please specify the software and hardware you used to produce the bug. (请给出详细依赖包信息,便于复现问题)
37
+ placeholder: |
38
+ ```bash
39
+ OS macOS-13.5.2
40
+ Environment Jupyter
41
+ Python 3.11.2
42
+ PaddleOCR 2.8.1
43
+ Install git
44
+ RAM 16.00 GB
45
+ CPU Apple M2
46
+ CUDA None
47
+ ```
48
+ validations:
49
+ required: true
50
+
51
+ - type: textarea
52
+ attributes:
53
+ label: 🌰 Minimal Reproducible Example (最小可复现问题的Demo)
54
+ description: >
55
+ When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem.
56
+ This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). (请务必提供该Demo,这样节省大家时间)
57
+ placeholder: |
58
+ ```bash
59
+ # Code to reproduce your issue here
60
+ ```
61
+ validations:
62
+ required: true
.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: 🙏 Q&A
4
+ url: https://github.com/PaddlePaddle/PaddleOCR/discussions/categories/q-a
5
+ about: Ask the community for help
6
+ - name: 💡 Feature requests and ideas
7
+ url: https://github.com/PaddlePaddle/PaddleOCR/discussions/categories/ideas
8
+ about: Share ideas for new features
9
+ - name: 🙌 Show and tell
10
+ url: https://github.com/PaddlePaddle/PaddleOCR/discussions/categories/show-and-tell
11
+ about: Show off something you've made
.github/dependabot.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Keep GitHub Actions up to date with GitHub's Dependabot...
2
+ # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
3
+ # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
4
+ version: 2
5
+ updates:
6
+ - package-ecosystem: github-actions
7
+ directory: /
8
+ groups:
9
+ github-actions:
10
+ patterns:
11
+ - "*" # Group all Actions updates into a single larger pull request
12
+ schedule:
13
+ interval: weekly
.github/workflows/build_publish_develop_docs.yml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build/Publish Develop Docs
2
+ on:
3
+ push:
4
+ branches:
5
+ - master
6
+ - main
7
+ permissions:
8
+ contents: write
9
+ jobs:
10
+ deploy:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v4
14
+ - name: Configure Git Credentials
15
+ run: |
16
+ git config user.name github-actions[bot]
17
+ git config user.email 41898282+github-actions[bot]@users.noreply.github.com
18
+ - uses: actions/setup-python@v5
19
+ with:
20
+ python-version: 3.x
21
+ - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
22
+ - uses: actions/cache@v4
23
+ with:
24
+ key: mkdocs-material-${{ env.cache_id }}
25
+ path: .cache
26
+ restore-keys: |
27
+ mkdocs-material-
28
+ - run: pip install mike mkdocs-material jieba mkdocs-git-revision-date-localized-plugin mkdocs-git-committers-plugin-2 mkdocs-static-i18n markdown-callouts
29
+ - run: |
30
+ git fetch origin gh-pages --depth=1
31
+ mike deploy --push --update-aliases main latest
.github/workflows/build_publish_release_docs.yml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build/Publish Release Docs
2
+ on:
3
+ push:
4
+ tags:
5
+ - v*
6
+
7
+ permissions:
8
+ contents: write
9
+ jobs:
10
+ deploy:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v4
14
+ - name: Configure Git Credentials
15
+ run: |
16
+ git config user.name github-actions[bot]
17
+ git config user.email github-actions[bot]@users.noreply.github.com
18
+ - uses: actions/setup-python@v5
19
+ with:
20
+ python-version: 3.x
21
+ - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
22
+ - uses: actions/cache@v4
23
+ with:
24
+ key: mkdocs-material-${{ env.cache_id }}
25
+ path: .cache
26
+ restore-keys: |
27
+ mkdocs-material-
28
+ - run: pip install mike mkdocs-material jieba mkdocs-git-revision-date-localized-plugin mkdocs-git-committers-plugin-2 mkdocs-static-i18n
29
+ - run: |
30
+ git fetch origin gh-pages --depth=1
31
+ mike deploy --push "${{ github.ref_name }}"
.github/workflows/close_inactive_issues.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Close inactive issues
2
+ on:
3
+ schedule:
4
+ - cron: "30 1 * * *"
5
+
6
+ jobs:
7
+ close-issues:
8
+ runs-on: ubuntu-latest
9
+ permissions:
10
+ issues: write
11
+ pull-requests: write
12
+ steps:
13
+ - uses: actions/stale@v9
14
+ with:
15
+ days-before-issue-stale: 90
16
+ days-before-issue-close: 14
17
+ stale-issue-label: "stale"
18
+ stale-issue-message: "This issue is stale because it has been open for 90 days with no activity."
19
+ close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
20
+ days-before-pr-stale: -1
21
+ days-before-pr-close: -1
22
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
23
+
.github/workflows/codestyle.yml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: PaddleOCR Code Style Check
2
+
3
+ on:
4
+ pull_request:
5
+ push:
6
+ branches: ['main', 'release/*']
7
+
8
+ jobs:
9
+ check-code-style:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/checkout@v4
14
+ with:
15
+ ref: ${{ github.ref }}
16
+
17
+ - uses: actions/setup-python@v5
18
+ with:
19
+ python-version: '3.10'
20
+
21
+ - name: Cache Python dependencies
22
+ uses: actions/cache@v4
23
+ with:
24
+ path: ~/.cache/pip
25
+ key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
26
+ restore-keys: |
27
+ ${{ runner.os }}-pip-
28
+
29
+ - name: Install Dependencies for Python
30
+ run: |
31
+ python -m pip install --upgrade pip
32
+ pip install "clang-format==13.0.0"
33
+
34
+ - uses: pre-commit/action@v3.0.1
35
+ with:
36
+ extra_args: '--all-files'
.github/workflows/link-check.yml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Link Checker
2
+
3
+ on:
4
+ repository_dispatch:
5
+ push:
6
+ branches:
7
+ - main
8
+ workflow_dispatch:
9
+ schedule:
10
+ - cron: "00 18 * * 6"
11
+
12
+ jobs:
13
+ linkChecker:
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - uses: actions/checkout@v3
17
+
18
+ - name: Link Checker
19
+ id: lychee
20
+ uses: lycheeverse/lychee-action@v2
21
+ env:
22
+ GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
23
+ with:
24
+ args: --exclude 'docs/index/*.md' 'docs/update/*.md' --verbose --no-progress --max-redirects 8 'docs/**/*.md'
25
+ format: markdown
26
+ fail: false
27
+ output: lychee/results.md
28
+
29
+ - name: Create Issue From File
30
+ if: steps.lychee.outputs.exit_code != 0
31
+ uses: peter-evans/create-issue-from-file@v5
32
+ with:
33
+ title: Link Checker Report
34
+ content-filepath: ./lychee/results.md
35
+ labels: report, automated issue
.github/workflows/python-publish.yml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will upload a Python Package using Twine when a release is created
2
+ # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3
+
4
+ # This workflow uses actions that are not certified by GitHub.
5
+ # They are provided by a third-party and are governed by
6
+ # separate terms of service, privacy policy, and support
7
+ # documentation.
8
+
9
+ name: Upload Python Package
10
+
11
+ on:
12
+ release:
13
+ types: [published]
14
+
15
+ permissions:
16
+ contents: read
17
+
18
+ jobs:
19
+ deploy:
20
+
21
+ runs-on: ubuntu-latest
22
+
23
+ steps:
24
+ - uses: actions/checkout@v4
25
+ - name: Set up Python
26
+ uses: actions/setup-python@v5
27
+ with:
28
+ python-version: '3.x'
29
+ - name: Install dependencies
30
+ run: |
31
+ python -m pip install --upgrade pip
32
+ pip install build==1.2.2
33
+ - name: Build package
34
+ run: python -m build
35
+ - name: Publish package
36
+ uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc
37
+ with:
38
+ user: __token__
39
+ password: ${{ secrets.PYPI_API_TOKEN }}
.github/workflows/test_gpu.yml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: PaddleOCR PR Tests GPU
2
+
3
+ on:
4
+ push:
5
+ branches: ["main"]
6
+ paths-ignore:
7
+ - '**.md'
8
+ - '**.txt'
9
+ - '**.yml'
10
+ - '**.yaml'
11
+ pull_request:
12
+ branches: ["main"]
13
+ paths-ignore:
14
+ - '**.md'
15
+ - '**.txt'
16
+ - '**.yml'
17
+ - '**.yaml'
18
+ workflow_dispatch:
19
+ env:
20
+ PR_ID: ${{ github.event.pull_request.number }}
21
+ COMMIT_ID: ${{ github.event.pull_request.head.sha }}
22
+ work_dir: /workspace/PaddleOCR
23
+ PADDLENLP_ROOT: /workspace/PaddleOCR
24
+ TASK: paddleocr-CI-${{ github.event.pull_request.number }}
25
+ BRANCH: ${{ github.event.pull_request.base.ref }}
26
+ AGILE_COMPILE_BRANCH: ${{ github.event.pull_request.base.ref }}
27
+ DIR_NAME: ${{ github.repository }}
28
+ permissions:
29
+ contents: read
30
+
31
+ jobs:
32
+ test-pr-gpu:
33
+ runs-on: [self-hosted, GPU-2Card-OCR]
34
+ steps:
35
+ - name: run test
36
+ env:
37
+ py_version: "3.10"
38
+ paddle_whl: https://paddle-qa.bj.bcebos.com/paddle-pipeline/Develop-GpuSome-LinuxCentos-Gcc82-Cuda118-Cudnn86-Trt85-Py310-CINN-Compile/latest/paddlepaddle_gpu-0.0.0-cp310-cp310-linux_x86_64.whl
39
+ docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddle:latest-dev-cuda11.8-cudnn8.6-trt8.5-gcc82
40
+ run: |
41
+ work_dir=$RANDOM
42
+ mkdir $work_dir
43
+ cd $work_dir
44
+ git clone --depth=1 https://github.com/PaddlePaddle/PaddleOCR.git -b main
45
+ cd PaddleOCR
46
+ git fetch origin pull/${PR_ID}/head:ci_build
47
+ git checkout ci_build
48
+ docker run --gpus all --rm -i --name PaddleOCR_CI_$RANDOM \
49
+ --shm-size=128g --net=host \
50
+ -v $PWD:/workspace -w /workspace \
51
+ -e "py_version=${py_version}" \
52
+ -e "paddle_whl=${paddle_whl}" \
53
+ ${docker_image} /bin/bash -c '
54
+ ldconfig;
55
+ nvidia-smi
56
+ df -hl
57
+
58
+ echo ${py_version}
59
+ rm -rf run_env
60
+ mkdir run_env
61
+ ln -s $(which python${py_version}) run_env/python
62
+ ln -s $(which python${py_version}) run_env/python3
63
+ ln -s $(which pip${py_version}) run_env/pip
64
+ export PATH=$PWD/run_env:${PATH}
65
+
66
+ python -m pip install paddlepaddle-gpu==3.1.0 -i https://www.paddlepaddle.org.cn/packages/stable/cu118/
67
+ python -c "import paddle; paddle.version.show()"
68
+ python -m pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
69
+ python -m pip install pytest
70
+ if [ -f requirements.txt ]; then python -m pip install -r requirements.txt; fi
71
+ python -m pip install -e ".[all]"
72
+ python -m pytest --verbose tests/
73
+ '
.github/workflows/tests.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: PaddleOCR PR Tests
2
+
3
+ on:
4
+ push:
5
+ branches: ["main", "release/*"]
6
+ paths-ignore:
7
+ - '**.md'
8
+ - '**.txt'
9
+ - '**.yml'
10
+ - '**.yaml'
11
+ pull_request:
12
+ branches: ["main", "release/*"]
13
+ paths-ignore:
14
+ - '**.md'
15
+ - '**.txt'
16
+ - '**.yml'
17
+ - '**.yaml'
18
+
19
+ permissions:
20
+ contents: read
21
+
22
+ jobs:
23
+ test-pr:
24
+ runs-on: ubuntu-latest
25
+
26
+ steps:
27
+ - uses: actions/checkout@v4
28
+ - name: Set up Python 3.10
29
+ uses: actions/setup-python@v5
30
+ with:
31
+ python-version: "3.10"
32
+
33
+ - name: Cache dependencies
34
+ uses: actions/cache@v4
35
+ with:
36
+ path: |
37
+ ~/.cache/pip
38
+ ~/.local/lib/python3.10/site-packages
39
+ ~/.paddleocr/
40
+ key: ${{ runner.os }}-dependencies-${{ hashFiles('**/requirements.txt', 'pyproject.toml') }}
41
+ restore-keys: |
42
+ ${{ runner.os }}-dependencies-
43
+
44
+ - name: Install dependencies
45
+ run: |
46
+ python -m pip install --upgrade pip
47
+ pip install pytest
48
+ if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
49
+ python -m pip install paddlepaddle==3.2.0 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/
50
+ python -m pip install -e '.[all]' 'paddlex@git+https://github.com/PaddlePaddle/PaddleX.git@develop'
51
+ - name: Test with pytest
52
+ run: |
53
+ pytest --verbose tests/
.gitignore ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ .ipynb_checkpoints/
4
+ *.py[cod]
5
+ *$py.class
6
+
7
+ # C extensions
8
+ *.so
9
+
10
+ inference/
11
+ inference_results/
12
+ output/
13
+ train_data/
14
+ log/
15
+ *.DS_Store
16
+ *.vs
17
+ *.user
18
+ *~
19
+ *.vscode
20
+ *.idea
21
+
22
+ *.log
23
+ .clang-format
24
+ .clang_format.hook
25
+
26
+ build/
27
+ dist/
28
+ *.egg-info/
29
+ /deploy/android_demo/app/OpenCV/
30
+ /deploy/android_demo/app/PaddleLite/
31
+ /deploy/android_demo/app/.cxx/
32
+ /deploy/android_demo/app/cache/
33
+ test_tipc/web/models/
34
+ test_tipc/web/node_modules/
35
+ deploy/
36
+ ppocr/data/imaug/
37
+ *.jpg
38
+ *.png
39
+ *.jpeg
40
+ deploy/
41
+ ppocr/data/imaug/
42
+ deploy/
43
+ ppocr/data/imaug/
.lycheeignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ zhuanlan.zhihu.com/*
2
+ https://demo.doctrp.top/
3
+ http://127.0.0.1:8001/
4
+ http://localhost:9003
5
+ https://rrc.cvc.uab.es/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
+ hooks:
5
+ - id: check-added-large-files
6
+ args: ['--maxkb=512']
7
+ - id: check-case-conflict
8
+ - id: check-merge-conflict
9
+ - id: check-symlinks
10
+ - id: detect-private-key
11
+ - id: end-of-file-fixer
12
+ - id: trailing-whitespace
13
+ files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|py)$
14
+ - repo: https://github.com/Lucas-C/pre-commit-hooks
15
+ rev: v1.5.5
16
+ hooks:
17
+ - id: remove-crlf
18
+ - id: remove-tabs
19
+ files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|py)$
20
+ - repo: local
21
+ hooks:
22
+ - id: clang-format
23
+ name: clang-format
24
+ description: Format files with ClangFormat
25
+ entry: bash .clang_format.hook -i
26
+ language: system
27
+ files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$
28
+ # For Python files
29
+ - repo: https://github.com/psf/black.git
30
+ rev: 24.10.0
31
+ hooks:
32
+ - id: black
33
+ files: (.*\.(py|pyi|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
34
+
35
+ # Flake8
36
+ - repo: https://github.com/pycqa/flake8
37
+ rev: 7.1.1
38
+ hooks:
39
+ - id: flake8
40
+ args:
41
+ - --count
42
+ - --select=E9,F63,F7,F82,E721
43
+ - --show-source
44
+ - --statistics
45
+ exclude: ^benchmark/|^test_tipc/
.streamlit/config.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [server]
2
+ maxMessageSize = 500
.style.yapf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ column_limit = 80
CNAME ADDED
@@ -0,0 +1 @@
 
 
1
+ www.paddleocr.ai
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ RUN apt-get update && apt-get install -y \
6
+ build-essential \
7
+ curl \
8
+ software-properties-common \
9
+ git \
10
+ libgl1-mesa-glx \
11
+ libgomp1 \
12
+ && rm -rf /var/lib/apt/lists/*
13
+
14
+ COPY . .
15
+
16
+ RUN pip install --no-cache-dir -r requirements.txt
17
+
18
+ EXPOSE 8501
19
+
20
+ HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
21
+
22
+ ENTRYPOINT ["streamlit", "run", "vizan_studio_v2/app.py", "--server.port=8501", "--server.address=0.0.0.0"]
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
MANIFEST.in ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prune .github
2
+ prune applications
3
+ prune benchmark
4
+ prune configs
5
+ prune deploy
6
+ prune doc
7
+ prune docs
8
+ prune overrides
9
+ prune ppocr/ext_op
10
+ prune ppocr/losses
11
+ prune ppocr/metrics
12
+ prune ppocr/modeling
13
+ prune ppocr/optimizer
14
+ prune ppstructure/docs
15
+ prune test_tipc
16
+ prune tests
17
+ exclude .clang_format.hook
18
+ exclude .gitignore
19
+ exclude .pre-commit-config.yaml
20
+ exclude .style.yapf
21
+ exclude mkdocs.yml
22
+ exclude train.sh
README.md ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <p>
3
+ <img width="100%" src="./docs/images/Banner.png" alt="PaddleOCR Banner">
4
+ </p>
5
+
6
+ English | [简体中文](./readme/README_cn.md) | [繁體中文](./readme/README_tcn.md) | [日本語](./readme/README_ja.md) | [한국어](./readme/README_ko.md) | [Français](./readme/README_fr.md) | [Русский](./readme/README_ru.md) | [Español](./readme/README_es.md) | [العربية](./readme/README_ar.md)
7
+
8
+ <!-- icon -->
9
+ [![stars](https://img.shields.io/github/stars/PaddlePaddle/PaddleOCR?color=ccf)](https://github.com/PaddlePaddle/PaddleOCR)
10
+ [![forks](https://img.shields.io/github/forks/PaddlePaddle/PaddleOCR.svg)](https://github.com/PaddlePaddle/PaddleOCR)
11
+ [![arXiv](https://img.shields.io/badge/PaddleOCR_3.0-Technical%20Report-b31b1b.svg?logo=arXiv)](https://arxiv.org/pdf/2507.05595)
12
+ [![arXiv](https://img.shields.io/badge/PaddleOCR--VL-Technical%20Report-b31b1b.svg?logo=arXiv)](https://arxiv.org/abs/2510.14528)
13
+
14
+ [![PyPI Downloads](https://static.pepy.tech/badge/paddleocr/month)](https://pepy.tech/projectsproject/paddleocr)
15
+ [![PyPI Downloads](https://static.pepy.tech/badge/paddleocr)](https://pepy.tech/projects/paddleocr)
16
+ [![Used by](https://img.shields.io/badge/Used%20by-6k%2B%20repositories-blue)](https://github.com/PaddlePaddle/PaddleOCR/network/dependents)
17
+ [![PyPI version](https://img.shields.io/pypi/v/paddleocr)](https://pypi.org/project/paddleocr/)
18
+ ![python](https://img.shields.io/badge/python-3.8~3.12-aff.svg)
19
+
20
+ ![os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-pink.svg)
21
+ ![hardware](https://img.shields.io/badge/hardware-cpu%2C%20gpu%2C%20xpu%2C%20npu-yellow.svg)
22
+ [![License](https://img.shields.io/badge/license-Apache_2.0-green)](../LICENSE)
23
+ [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/PaddlePaddle/PaddleOCR)
24
+ [![AI Studio](https://img.shields.io/badge/PaddleOCR-_Offiical_Website-1927BA?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAAABlBMVEU2P+X///+1KuUwAAAHKklEQVR42u3dS5bjOAwEwALvf2fMavZum6IAImI7b2yYSqU+1Zb//gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADKCR/+fzly7rD92yVg69xh8zeLwOa5w+ZvFYHtc4ft3ykB++cOm79PAp6YO2z/Ngl4ZO5l+9+yT4QAvLqS748VF33Ylzdvzpl72f6z53YIGJ6SZdPeNHcIwOycaADdLgCSIgAIgCOAACAAykIAEAAEAAFAABCAT+WQuQVgeBqXhXQIQAAYegowLQBpbg3gZGFyAC6vgBQAMREA2/YfDPxyaDQNyTNz+3Zwn5J4ZG7PB2h0kHhi7plPCImmJwkPzO0RMa3OET0i5uGlzHFze0xcu0vE2Dq3J4U2vEPgSaHbFzPNDQAAAAAAAMBNovdw+cP/ny+uaf7w/+eYADy8kE+F4Offdjn6zZXhAXgiA78G4MNNsmnu1Xr7b3mbOL8T5Ja5bw/A35EC2LiWpzt1y9jRugBy30fLg3NvHPvnuZcC2NsCUXA/aRmA89V07Fwgt37uH8deCmBr6N44pP4UgaUATpdA7v/cMbIB8okliY65/SW5HhJ1ehPmM+8edwXgpbu4R88FayR32Y/P7oZZbOx13/Zr//ZHx27bAPnkFoyewYlbAhD3TvBobr95gaUAtr1EdNx1lgI4OcTTuR3z6+FZMEDRcu9ZCuDgGCdyGxMa4EgBRMvcjrkM7NgBZw5c0TwAUWUhZwRXA2xaya65Xa3jO2qYZ8bu2AD5w38tG5V8aZpoGN6Tz0bOfa9bceyWAciTO0jWyO1Tc5cLwJmF/JfPnXVyu3/slgHIg1n79O2O5fZv+1cHV7sC2HYqmUdHysNzX3sVkMcjUK5Gc+dMs28E5bGtm0V3gloBOP9vgZv+4sYn3RUaYFMCol5uN77g6lUApc8pWs69Zn7snS9Z9Q8G0S0AUTVUUTG3A54R1KSvo/diLAv5fKzynZeN6xogC75u93+AtBTA47OlAFSv6qY/vp3DAjD8iv2ZdFYJwKynMhTK1rInPfzaxW81LnvSgFP9KxrATaCLA3DxHpbFX31ZyNm5XRZyXG5bNkAWfP0rcrsUwOgC6NIAzgBcBiqAWwPgLrAGuGBP6jr2sifdfiJ6QQM4Bbw4AK4B3129ZSFn53ZZyA/GyFty27IBFMDFAXAG8PbyLQv5xULGPRl0K3h2AbwcgCZPhs+LD1zLnjS6AN4NwMU/DVFh7LyhASreTbvqrxdr/J4XT4Swz4FrTS+AGJ7bNbwAYkxuWzZAVljHrJfbjb9wviYXwFO/FJ8Vli4vaICsEMFyBbA3tmtsAUS0zG1c/bj4YwsZH2/+Whd0+1Nb+S7IE2sfPw4RL0XmsR8Nqvz7qFngmPHF34EqjP15AAofAkosZKPC/K6FVoeP02Ehi540NG6AK/4pYP3cLgVwXwHkDQ1QcSGb/uF4WwCmfX8u/+4vgLINcMUlQIfcLgXwXAF0+BGkpQDuuJx7/hwgpu//cWVuO3wxJOz/z8297vgYBwaIO3O7Kn+c194578ltywbIgu8fl+Z2lS+APvnLjnOv8hsgSqxjgwL4Ln9LAezaj98tgPzy7ZcC+GQzxrWxXQpgx370dm6/H7v6jaBoso5dY1swAFlwHWvfBf5pxVa93fCtdx64+1dsgCy4joWvAfPX9VoKYMs6Zse9/8Mlvv7LILlhAfKFFdsSutJXAdFkL3qlADJPrXFcXAC5KYaH586jO9mtAch9S3T0GQJ726ZWAE49kjP3rlDJuetdaL/1zeqZY9c7CRz7s0wCUPxienQBnAuAAtAAlxaAAAxfyBQABSAACkAAFIAAKAABUAACMEkKwL170oh7V8ueNLoAjgTAXWAN4BRwcABcA2oABTA4AApAAyiAwQFQABpAAQwOgALQADMWUgCuEmNyu15fSIY3gFPAiwPgFFADKIDBAVAAGkABCIACmBqAUAAaQAHMDUCMWkgBuMWw3K43F5LhDeAU8OIAuAmkARTA4AAoAA2gAARAAUwNgLvAGkABDA6Au8AaoKOJuV0vLSTDG8Ap4MUBcBNIAyiAwQFQABpAAQwOgALQAApAABTA1AC4C6wBOhqb23V+IRneAE4BLw6Aa0ANoAAGB0ABaAAFMDgACkADKAABUABTA+AusAboKATAQs4trjV+IYcfuJYCcA6gAATAQk69dFkKQANYyLkFcLIBFIDLQAVwawDsSRrAEWBwAJwCagAFMDgACkADKIDBAVAAGkABCIACmBoAzwXWAApgcADsSRrg0iNACoACEADXgAIwdCFTACykALgGFIAfl0kBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPBv/gN+IH8U6YveYgAAAABJRU5ErkJggg==&labelColor=white)](https://www.paddleocr.com)
25
+
26
+
27
+
28
+ **PaddleOCR is an industry-leading, production-ready OCR and document AI engine, offering end-to-end solutions from text extraction to intelligent document understanding**
29
+
30
+ </div>
31
+
32
+ # PaddleOCR
33
+ [![Framework](https://img.shields.io/badge/PaddlePaddle-3.0-orange)](https://www.paddlepaddle.org.cn/en)
34
+ [![Accuracy](https://img.shields.io/badge/Recognition%20Accuracy-🏆-green)](#)
35
+ [![Multi-Language](https://img.shields.io/badge/Support_Languages-100+-brightgreen)](#)
36
+ [![Handwriting](https://img.shields.io/badge/Handwriting-✓-success)](#)
37
+ [![Hardware](https://img.shields.io/badge/Heterogeneous%20Hardware-Kunlunxin%20%7C%20Ascend_NPU-red)](#)
38
+
39
+ > [!TIP]
40
+ > PaddleOCR now provides an MCP server that supports integration with Agent applications like Claude Desktop. For details, please refer to [PaddleOCR MCP Server](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/deployment/mcp_server.html).
41
+ >
42
+ > The PaddleOCR 3.0 Technical Report is now available. See details at: [PaddleOCR 3.0 Technical Report](https://arxiv.org/abs/2507.05595).
43
+ >
44
+ > The PaddleOCR-VL Technical Report is now available. See details at [PaddleOCR-VL Technical Report](https://arxiv.org/abs/2510.14528).
45
+ >
46
+ > The Beta version of the PaddleOCR official website is now live, offering a more convenient online experience and large-scale PDF file parsing, as well as free API and MCP services. For more details, please visit the [PaddleOCR official website](https://www.paddleocr.com).
47
+
48
+
49
+ **PaddleOCR** converts documents and images into **structured, AI-friendly data** (like JSON and Markdown) with **industry-leading accuracy**—powering AI applications for everyone from indie developers and startups to large enterprises worldwide. With over **60,000 stars** and deep integration into leading projects like **MinerU, RAGFlow, pathway and cherry-studio**, PaddleOCR has become the **premier solution** for developers building intelligent document applications in the **AI era**.
50
+
51
+ ### PaddleOCR 3.0 Core Features
52
+
53
+ [![HuggingFace](https://img.shields.io/badge/PaddleOCR--VL-_Demo_on_HuggingFace-yellow?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAF8AAABYCAMAAACkl9t/AAAAk1BMVEVHcEz/nQv/nQv/nQr/nQv/nQr/nQv/nQv/nQr/wRf/txT/pg7/yRr/rBD/zRz/ngv/oAz/zhz/nwv/txT/ngv/0B3+zBz/nQv/0h7/wxn/vRb/thXkuiT/rxH/pxD/ogzcqyf/nQvTlSz/czCxky7/SjifdjT/Mj3+Mj3wMj15aTnDNz+DSD9RTUBsP0FRO0Q6O0WyIxEIAAAAGHRSTlMADB8zSWF3krDDw8TJ1NbX5efv8ff9/fxKDJ9uAAAGKklEQVR42u2Z63qjOAyGC4RwCOfB2JAGqrSb2WnTw/1f3UaWcSGYNKTdf/P+mOkTrE+yJBulvfvLT2A5ruenaVHyIks33npl/6C4s/ZLAM45SOi/1FtZPyFur1OYofBX3w7d54Bxm+E8db+nDr12ttmESZ4zludJEG5S7TO72YPlKZFyE+YCYUJTBZsMiNS5Sd7NlDmKM2Eg2JQg8awbglfqgbhArjxkS7dgp2RH6hc9AMLdZYUtZN5DJr4molC8BfKrEkPKEnEVjLbgW1fLy77ZVOJagoIcLIl+IxaQZGjiX597HopF5CkaXVMDO9Pyix3AFV3kw4lQLCbHuMovz8FallbcQIJ5Ta0vks9RnolbCK84BtjKRS5uA43hYoZcOBGIG2Epbv6CvFVQ8m8loh66WNySsnN7htL58LNp+NXT8/PhXiBXPMjLSxtwp8W9f/1AngRierBkA+kk/IpUSOeKByzn8y3kAAAfh//0oXgV4roHm/kz4E2z//zRc3/lgwBzbM2mJxQEa5pqgX7d1L0htrhx7LKxOZlKbwcAWyEOWqYSI8YPtgDQVjpB5nvaHaSnBaQSD6hweDi8PosxD6/PT09YY3xQA7LTCTKfYX+QHpA0GCcqmEHvr/cyfKQTEuwgbs2kPxJEB0iNjfJcCTPyocx+A0griHSmADiC91oNGVwJ69RudYe65vJmoqfpul0lrqXadW0jFKH5BKwAeCq+Den7s+3zfRJzA61/Uj/9H/VzLKTx9jFPPdXeeP+L7WEvDLAKAIoF8bPTKT0+TM7W8ePj3Rz/Yn3kOAp2f1Kf0Weony7pn/cPydvhQYV+eFOfmOu7VB/ViPe34/EN3RFHY/yRuT8ddCtMPH/McBAT5s+vRde/gf2c/sPsjLK+m5IBQF5tO+h2tTlBGnP6693JdsvofjOPnnEHkh2TnV/X1fBl9S5zrwuwF8NFrAVJVwCAPTe8gaJlomqlp0pv4Pjn98tJ/t/fL++6unpR1YGC2n/KCoa0tTLoKiEeUPDl94nj+5/Tv3/eT5vBQ60X1S0oZr+IWRR8Ldhu7AlLjPISlJcO9vrFotky9SpzDequlwEir5beYAc0R7D9KS1DXva0jhYRDXoExPdc6yw5GShkZXe9QdO/uOvHofxjrV/TNS6iMJS+4TcSTgk9n5agJdBQbB//IfF/HpvPt3Tbi7b6I6K0R72p6ajryEJrENW2bbeVUGjfgoals4L443c7BEE4mJO2SpbRngxQrAKRudRzGQ8jVOL2qDVjjI8K1gc3TIJ5KiFZ1q+gdsARPB4NQS4AjwVSt72DSoXNyOWUrU5mQ9nRYyjp89Xo7oRI6Bga9QNT1mQ/ptaJq5T/7WcgAZywR/XlPGAUDdet3LE+qS0TI+g+aJU8MIqjo0Kx8Ly+maxLjJmjQ18rA0YCkxLQbUZP1WqdmyQGJLUm7VnQFqodmXSqmRrdVpqdzk5LvmvgtEcW8PMGdaS23EOWyDVbACZzUJPaqMbjDxpA3Qrgl0AikimGDbqmyT8P8NOYiqrldF8rX+YN7TopX4UoHuSCYY7cgX4gHwclQKl1zhx0THf+tCAUValzjI7Wg9EhptrkIcfIJjA94evOn8B2eHaVzvBrnl2ig0So6hvPaz0IGcOvTHvUIlE2+prqAxLSQxZlU2stql1NqCCLdIiIN/i1DBEHUoElM9dBravbiAnKqgpi4IBkw+utSPIoBijDXJipSVV7MpOEJUAc5Qmm3BnUN+w3hteEieYKfRZSIUcXKMVf0u5wD4EwsUNVvZOtUT7A2GkffHjByWpHqvRBYrTV72a6j8zZ6W0DTE86Hn04bmyWX3Ri9WH7ZU6Q7h+ZHo0nHUAcsQvVhXRDZHChwiyi/hnPuOsSEF6Exk3o6Y9DT1eZ+6cASXk2Y9k+6EOQMDGm6WBK10wOQJCBwren86cPPWUcRAnTVjGcU1LBgs9FURiX/e6479yZcLwCBmTxiawEwrOcleuu12t3tbLv/N4RLYIBhYexm7Fcn4OJcn0+zc+s8/VfPeddZHAGN6TT8eGczHdR/Gts1/MzDkThr23zqrVfAMFT33Nx1RJsx1k5zuWILLnG/vsH+Fv5D4NTVcp1Gzo8AAAAAElFTkSuQmCC&labelColor=white)](https://huggingface.co/spaces/PaddlePaddle/PaddleOCR-VL_Online_Demo)
54
+ [![AI Studio](https://img.shields.io/badge/PaddleOCR--VL-_Demo_on_AI_Studio-1927BA?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAAABlBMVEU2P+X///+1KuUwAAAHKklEQVR42u3dS5bjOAwEwALvf2fMavZum6IAImI7b2yYSqU+1Zb//gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADKCR/+fzly7rD92yVg69xh8zeLwOa5w+ZvFYHtc4ft3ykB++cOm79PAp6YO2z/Ngl4ZO5l+9+yT4QAvLqS748VF33Ylzdvzpl72f6z53YIGJ6SZdPeNHcIwOycaADdLgCSIgAIgCOAACAAykIAEAAEAAFAABCAT+WQuQVgeBqXhXQIQAAYegowLQBpbg3gZGFyAC6vgBQAMREA2/YfDPxyaDQNyTNz+3Zwn5J4ZG7PB2h0kHhi7plPCImmJwkPzO0RMa3OET0i5uGlzHFze0xcu0vE2Dq3J4U2vEPgSaHbFzPNDQAAAAAAAMBNovdw+cP/ny+uaf7w/+eYADy8kE+F4Offdjn6zZXhAXgiA78G4MNNsmnu1Xr7b3mbOL8T5Ja5bw/A35EC2LiWpzt1y9jRugBy30fLg3NvHPvnuZcC2NsCUXA/aRmA89V07Fwgt37uH8deCmBr6N44pP4UgaUATpdA7v/cMbIB8okliY65/SW5HhJ1ehPmM+8edwXgpbu4R88FayR32Y/P7oZZbOx13/Zr//ZHx27bAPnkFoyewYlbAhD3TvBobr95gaUAtr1EdNx1lgI4OcTTuR3z6+FZMEDRcu9ZCuDgGCdyGxMa4EgBRMvcjrkM7NgBZw5c0TwAUWUhZwRXA2xaya65Xa3jO2qYZ8bu2AD5w38tG5V8aZpoGN6Tz0bOfa9bceyWAciTO0jWyO1Tc5cLwJmF/JfPnXVyu3/slgHIg1n79O2O5fZv+1cHV7sC2HYqmUdHysNzX3sVkMcjUK5Gc+dMs28E5bGtm0V3gloBOP9vgZv+4sYn3RUaYFMCol5uN77g6lUApc8pWs69Zn7snS9Z9Q8G0S0AUTVUUTG3A54R1KSvo/diLAv5fKzynZeN6xogC75u93+AtBTA47OlAFSv6qY/vp3DAjD8iv2ZdFYJwKynMhTK1rInPfzaxW81LnvSgFP9KxrATaCLA3DxHpbFX31ZyNm5XRZyXG5bNkAWfP0rcrsUwOgC6NIAzgBcBiqAWwPgLrAGuGBP6jr2sifdfiJ6QQM4Bbw4AK4B3129ZSFn53ZZyA/GyFty27IBFMDFAXAG8PbyLQv5xULGPRl0K3h2AbwcgCZPhs+LD1zLnjS6AN4NwMU/DVFh7LyhASreTbvqrxdr/J4XT4Swz4FrTS+AGJ7bNbwAYkxuWzZAVljHrJfbjb9wviYXwFO/FJ8Vli4vaICsEMFyBbA3tmtsAUS0zG1c/bj4YwsZH2/+Whd0+1Nb+S7IE2sfPw4RL0XmsR8Nqvz7qFngmPHF34EqjP15AAofAkosZKPC/K6FVoeP02Ehi540NG6AK/4pYP3cLgVwXwHkDQ1QcSGb/uF4WwCmfX8u/+4vgLINcMUlQIfcLgXwXAF0+BGkpQDuuJx7/hwgpu//cWVuO3wxJOz/z8297vgYBwaIO3O7Kn+c194578ltywbIgu8fl+Z2lS+APvnLjnOv8hsgSqxjgwL4Ln9LAezaj98tgPzy7ZcC+GQzxrWxXQpgx370dm6/H7v6jaBoso5dY1swAFlwHWvfBf5pxVa93fCtdx64+1dsgCy4joWvAfPX9VoKYMs6Zse9/8Mlvv7LILlhAfKFFdsSutJXAdFkL3qlADJPrXFcXAC5KYaH586jO9mtAch9S3T0GQJ726ZWAE49kjP3rlDJuetdaL/1zeqZY9c7CRz7s0wCUPxienQBnAuAAtAAlxaAAAxfyBQABSAACkAAFIAAKAABUAACMEkKwL170oh7V8ueNLoAjgTAXWAN4BRwcABcA2oABTA4AApAAyiAwQFQABpAAQwOgALQADMWUgCuEmNyu15fSIY3gFPAiwPgFFADKIDBAVAAGkABCIACmBqAUAAaQAHMDUCMWkgBuMWw3K43F5LhDeAU8OIAuAmkARTA4AAoAA2gAARAAUwNgLvAGkABDA6Au8AaoKOJuV0vLSTDG8Ap4MUBcBNIAyiAwQFQABpAAQwOgALQAApAABTA1AC4C6wBOhqb23V+IRneAE4BLw6Aa0ANoAAGB0ABaAAFMDgACkADKAABUABTA+AusAboKATAQs4trjV+IYcfuJYCcA6gAATAQk69dFkKQANYyLkFcLIBFIDLQAVwawDsSRrAEWBwAJwCagAFMDgACkADKIDBAVAAGkABCIACmBoAzwXWAApgcADsSRrg0iNACoACEADXgAIwdCFTACykALgGFIAfl0kBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPBv/gN+IH8U6YveYgAAAABJRU5ErkJggg==&labelColor=white)](https://aistudio.baidu.com/application/detail/98365)
55
+ [![ModelScope](https://img.shields.io/badge/PaddleOCR--VL-_Demo_on_ModelScope-purple?logo=data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjIzIiBoZWlnaHQ9IjIwMCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KCiA8Zz4KICA8dGl0bGU+TGF5ZXIgMTwvdGl0bGU+CiAgPHBhdGggaWQ9InN2Z18xNCIgZmlsbD0iIzYyNGFmZiIgZD0ibTAsODkuODRsMjUuNjUsMGwwLDI1LjY0OTk5bC0yNS42NSwwbDAsLTI1LjY0OTk5eiIvPgogIDxwYXRoIGlkPSJzdmdfMTUiIGZpbGw9IiM2MjRhZmYiIGQ9Im05OS4xNCwxMTUuNDlsMjUuNjUsMGwwLDI1LjY1bC0yNS42NSwwbDAsLTI1LjY1eiIvPgogIDxwYXRoIGlkPSJzdmdfMTYiIGZpbGw9IiM2MjRhZmYiIGQ9Im0xNzYuMDksMTQxLjE0bC0yNS42NDk5OSwwbDAsMjIuMTlsNDcuODQsMGwwLC00Ny44NGwtMjIuMTksMGwwLDI1LjY1eiIvPgogIDxwYXRoIGlkPSJzdmdfMTciIGZpbGw9IiMzNmNmZDEiIGQ9Im0xMjQuNzksODkuODRsMjUuNjUsMGwwLDI1LjY0OTk5bC0yNS42NSwwbDAsLTI1LjY0OTk5eiIvPgogIDxwYXRoIGlkPSJzdmdfMTgiIGZpbGw9IiMzNmNmZDEiIGQ9Im0wLDY0LjE5bDI1LjY1LDBsMCwyNS42NWwtMjUuNjUsMGwwLC0yNS42NXoiLz4KICA8cGF0aCBpZD0ic3ZnXzE5IiBmaWxsPSIjNjI0YWZmIiBkPSJtMTk4LjI4LDg5Ljg0bDI1LjY0OTk5LDBsMCwyNS42NDk5OWwtMjUuNjQ5OTksMGwwLC0yNS42NDk5OXoiLz4KICA8cGF0aCBpZD0ic3ZnXzIwIiBmaWxsPSIjMzZjZmQxIiBkPSJtMTk4LjI4LDY0LjE5bDI1LjY0OTk5LDBsMCwyNS42NWwtMjUuNjQ5OTksMGwwLC0yNS42NXoiLz4KICA8cGF0aCBpZD0ic3ZnXzIxIiBmaWxsPSIjNjI0YWZmIiBkPSJtMTUwLjQ0LDQybDAsMjIuMTlsMjUuNjQ5OTksMGwwLDI1LjY1bDIyLjE5LDBsMCwtNDcuODRsLTQ3Ljg0LDB6Ii8+CiAgPHBhdGggaWQ9InN2Z18yMiIgZmlsbD0iIzM2Y2ZkMSIgZD0ibTczLjQ5LDg5Ljg0bDI1LjY1LDBsMCwyNS42NDk5OWwtMjUuNjUsMGwwLC0yNS42NDk5OXoiLz4KICA8cGF0aCBpZD0ic3ZnXzIzIiBmaWxsPSIjNjI0YWZmIiBkPSJtNDcuODQsNjQuMTlsMjUuNjUsMGwwLC0yMi4xOWwtNDcuODQsMGwwLDQ3Ljg0bDIyLjE5LDBsMCwtMjUuNjV6Ii8+CiAgPHBhdGggaWQ9InN2Z18yNCIgZmlsbD0iIzYyNGFmZiIgZD0ibTQ3Ljg0LDExNS40OWwtMjIuMTksMGwwLDQ3Ljg0bDQ3Ljg0LDBsMCwtMjIuMTlsLTI1LjY1LDBsMCwtMjUuNjV6Ii8+CiA8L2c+Cjwvc3ZnPg==&labelColor=white)](https://www.modelscope.cn/studios/PaddlePaddle/PaddleOCR-VL_Online_Demo)
56
+
57
+ [![AI Studio](https://img.shields.io/badge/PP--OCRv5-Demo_on_AI_Studio-1927BA?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAAABlBMVEU2P+X///+1KuUwAAAHKklEQVR42u3dS5bjOAwEwALvf2fMavZum6IAImI7b2yYSqU+1Zb//gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADKCR/+fzly7rD92yVg69xh8zeLwOa5w+ZvFYHtc4ft3ykB++cOm79PAp6YO2z/Ngl4ZO5l+9+yT4QAvLqS748VF33Ylzdvzpl72f6z53YIGJ6SZdPeNHcIwOycaADdLgCSIgAIgCOAACAAykIAEAAEAAFAABCAT+WQuQVgeBqXhXQIQAAYegowLQBpbg3gZGFyAC6vgBQAMREA2/YfDPxyaDQNyTNz+3Zwn5J4ZG7PB2h0kHhi7plPCImmJwkPzO0RMa3OET0i5uGlzHFze0xcu0vE2Dq3J4U2vEPgSaHbFzPNDQAAAAAAAMBNovdw+cP/ny+uaf7w/+eYADy8kE+F4Offdjn6zZXhAXgiA78G4MNNsmnu1Xr7b3mbOL8T5Ja5bw/A35EC2LiWpzt1y9jRugBy30fLg3NvHPvnuZcC2NsCUXA/aRmA89V07Fwgt37uH8deCmBr6N44pP4UgaUATpdA7v/cMbIB8okliY65/SW5HhJ1ehPmM+8edwXgpbu4R88FayR32Y/P7oZZbOx13/Zr//ZHx27bAPnkFoyewYlbAhD3TvBobr95gaUAtr1EdNx1lgI4OcTTuR3z6+FZMEDRcu9ZCuDgGCdyGxMa4EgBRMvcjrkM7NgBZw5c0TwAUWUhZwRXA2xaya65Xa3jO2qYZ8bu2AD5w38tG5V8aZpoGN6Tz0bOfa9bceyWAciTO0jWyO1Tc5cLwJmF/JfPnXVyu3/slgHIg1n79O2O5fZv+1cHV7sC2HYqmUdHysNzX3sVkMcjUK5Gc+dMs28E5bGtm0V3gloBOP9vgZv+4sYn3RUaYFMCol5uN77g6lUApc8pWs69Zn7snS9Z9Q8G0S0AUTVUUTG3A54R1KSvo/diLAv5fKzynZeN6xogC75u93+AtBTA47OlAFSv6qY/vp3DAjD8iv2ZdFYJwKynMhTK1rInPfzaxW81LnvSgFP9KxrATaCLA3DxHpbFX31ZyNm5XRZyXG5bNkAWfP0rcrsUwOgC6NIAzgBcBiqAWwPgLrAGuGBP6jr2sifdfiJ6QQM4Bbw4AK4B3129ZSFn53ZZyA/GyFty27IBFMDFAXAG8PbyLQv5xULGPRl0K3h2AbwcgCZPhs+LD1zLnjS6AN4NwMU/DVFh7LyhASreTbvqrxdr/J4XT4Swz4FrTS+AGJ7bNbwAYkxuWzZAVljHrJfbjb9wviYXwFO/FJ8Vli4vaICsEMFyBbA3tmtsAUS0zG1c/bj4YwsZH2/+Whd0+1Nb+S7IE2sfPw4RL0XmsR8Nqvz7qFngmPHF34EqjP15AAofAkosZKPC/K6FVoeP02Ehi540NG6AK/4pYP3cLgVwXwHkDQ1QcSGb/uF4WwCmfX8u/+4vgLINcMUlQIfcLgXwXAF0+BGkpQDuuJx7/hwgpu//cWVuO3wxJOz/z8297vgYBwaIO3O7Kn+c194578ltywbIgu8fl+Z2lS+APvnLjnOv8hsgSqxjgwL4Ln9LAezaj98tgPzy7ZcC+GQzxrWxXQpgx370dm6/H7v6jaBoso5dY1swAFlwHWvfBf5pxVa93fCtdx64+1dsgCy4joWvAfPX9VoKYMs6Zse9/8Mlvv7LILlhAfKFFdsSutJXAdFkL3qlADJPrXFcXAC5KYaH586jO9mtAch9S3T0GQJ726ZWAE49kjP3rlDJuetdaL/1zeqZY9c7CRz7s0wCUPxienQBnAuAAtAAlxaAAAxfyBQABSAACkAAFIAAKAABUAACMEkKwL170oh7V8ueNLoAjgTAXWAN4BRwcABcA2oABTA4AApAAyiAwQFQABpAAQwOgALQADMWUgCuEmNyu15fSIY3gFPAiwPgFFADKIDBAVAAGkABCIACmBqAUAAaQAHMDUCMWkgBuMWw3K43F5LhDeAU8OIAuAmkARTA4AAoAA2gAARAAUwNgLvAGkABDA6Au8AaoKOJuV0vLSTDG8Ap4MUBcBNIAyiAwQFQABpAAQwOgALQAApAABTA1AC4C6wBOhqb23V+IRneAE4BLw6Aa0ANoAAGB0ABaAAFMDgACkADKAABUABTA+AusAboKATAQs4trjV+IYcfuJYCcA6gAATAQk69dFkKQANYyLkFcLIBFIDLQAVwawDsSRrAEWBwAJwCagAFMDgACkADKIDBAVAAGkABCIACmBoAzwXWAApgcADsSRrg0iNACoACEADXgAIwdCFTACykALgGFIAfl0kBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPBv/gN+IH8U6YveYgAAAABJRU5ErkJggg==&labelColor=white)](https://aistudio.baidu.com/community/app/91660/webUI)
58
+ [![AI Studio](https://img.shields.io/badge/PP--StructureV3-Demo_on_AI_Studio-1927BA?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAAABlBMVEU2P+X///+1KuUwAAAHKklEQVR42u3dS5bjOAwEwALvf2fMavZum6IAImI7b2yYSqU+1Zb//gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADKCR/+fzly7rD92yVg69xh8zeLwOa5w+ZvFYHtc4ft3ykB++cOm79PAp6YO2z/Ngl4ZO5l+9+yT4QAvLqS748VF33Ylzdvzpl72f6z53YIGJ6SZdPeNHcIwOycaADdLgCSIgAIgCOAACAAykIAEAAEAAFAABCAT+WQuQVgeBqXhXQIQAAYegowLQBpbg3gZGFyAC6vgBQAMREA2/YfDPxyaDQNyTNz+3Zwn5J4ZG7PB2h0kHhi7plPCImmJwkPzO0RMa3OET0i5uGlzHFze0xcu0vE2Dq3J4U2vEPgSaHbFzPNDQAAAAAAAMBNovdw+cP/ny+uaf7w/+eYADy8kE+F4Offdjn6zZXhAXgiA78G4MNNsmnu1Xr7b3mbOL8T5Ja5bw/A35EC2LiWpzt1y9jRugBy30fLg3NvHPvnuZcC2NsCUXA/aRmA89V07Fwgt37uH8deCmBr6N44pP4UgaUATpdA7v/cMbIB8okliY65/SW5HhJ1ehPmM+8edwXgpbu4R88FayR32Y/P7oZZbOx13/Zr//ZHx27bAPnkFoyewYlbAhD3TvBobr95gaUAtr1EdNx1lgI4OcTTuR3z6+FZMEDRcu9ZCuDgGCdyGxMa4EgBRMvcjrkM7NgBZw5c0TwAUWUhZwRXA2xaya65Xa3jO2qYZ8bu2AD5w38tG5V8aZpoGN6Tz0bOfa9bceyWAciTO0jWyO1Tc5cLwJmF/JfPnXVyu3/slgHIg1n79O2O5fZv+1cHV7sC2HYqmUdHysNzX3sVkMcjUK5Gc+dMs28E5bGtm0V3gloBOP9vgZv+4sYn3RUaYFMCol5uN77g6lUApc8pWs69Zn7snS9Z9Q8G0S0AUTVUUTG3A54R1KSvo/diLAv5fKzynZeN6xogC75u93+AtBTA47OlAFSv6qY/vp3DAjD8iv2ZdFYJwKynMhTK1rInPfzaxW81LnvSgFP9KxrATaCLA3DxHpbFX31ZyNm5XRZyXG5bNkAWfP0rcrsUwOgC6NIAzgBcBiqAWwPgLrAGuGBP6jr2sifdfiJ6QQM4Bbw4AK4B3129ZSFn53ZZyA/GyFty27IBFMDFAXAG8PbyLQv5xULGPRl0K3h2AbwcgCZPhs+LD1zLnjS6AN4NwMU/DVFh7LyhASreTbvqrxdr/J4XT4Swz4FrTS+AGJ7bNbwAYkxuWzZAVljHrJfbjb9wviYXwFO/FJ8Vli4vaICsEMFyBbA3tmtsAUS0zG1c/bj4YwsZH2/+Whd0+1Nb+S7IE2sfPw4RL0XmsR8Nqvz7qFngmPHF34EqjP15AAofAkosZKPC/K6FVoeP02Ehi540NG6AK/4pYP3cLgVwXwHkDQ1QcSGb/uF4WwCmfX8u/+4vgLINcMUlQIfcLgXwXAF0+BGkpQDuuJx7/hwgpu//cWVuO3wxJOz/z8297vgYBwaIO3O7Kn+c194578ltywbIgu8fl+Z2lS+APvnLjnOv8hsgSqxjgwL4Ln9LAezaj98tgPzy7ZcC+GQzxrWxXQpgx370dm6/H7v6jaBoso5dY1swAFlwHWvfBf5pxVa93fCtdx64+1dsgCy4joWvAfPX9VoKYMs6Zse9/8Mlvv7LILlhAfKFFdsSutJXAdFkL3qlADJPrXFcXAC5KYaH586jO9mtAch9S3T0GQJ726ZWAE49kjP3rlDJuetdaL/1zeqZY9c7CRz7s0wCUPxienQBnAuAAtAAlxaAAAxfyBQABSAACkAAFIAAKAABUAACMEkKwL170oh7V8ueNLoAjgTAXWAN4BRwcABcA2oABTA4AApAAyiAwQFQABpAAQwOgALQADMWUgCuEmNyu15fSIY3gFPAiwPgFFADKIDBAVAAGkABCIACmBqAUAAaQAHMDUCMWkgBuMWw3K43F5LhDeAU8OIAuAmkARTA4AAoAA2gAARAAUwNgLvAGkABDA6Au8AaoKOJuV0vLSTDG8Ap4MUBcBNIAyiAwQFQABpAAQwOgALQAApAABTA1AC4C6wBOhqb23V+IRneAE4BLw6Aa0ANoAAGB0ABaAAFMDgACkADKAABUABTA+AusAboKATAQs4trjV+IYcfuJYCcA6gAATAQk69dFkKQANYyLkFcLIBFIDLQAVwawDsSRrAEWBwAJwCagAFMDgACkADKIDBAVAAGkABCIACmBoAzwXWAApgcADsSRrg0iNACoACEADXgAIwdCFTACykALgGFIAfl0kBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPBv/gN+IH8U6YveYgAAAABJRU5ErkJggg==&labelColor=white)](https://aistudio.baidu.com/community/app/518494/webUI)
59
+ [![AI Studio](https://img.shields.io/badge/PP--ChatOCRv4-Demo_on_AI_Studio-1927BA?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAMAAADDpiTIAAAABlBMVEU2P+X///+1KuUwAAAHKklEQVR42u3dS5bjOAwEwALvf2fMavZum6IAImI7b2yYSqU+1Zb//gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADKCR/+fzly7rD92yVg69xh8zeLwOa5w+ZvFYHtc4ft3ykB++cOm79PAp6YO2z/Ngl4ZO5l+9+yT4QAvLqS748VF33Ylzdvzpl72f6z53YIGJ6SZdPeNHcIwOycaADdLgCSIgAIgCOAACAAykIAEAAEAAFAABCAT+WQuQVgeBqXhXQIQAAYegowLQBpbg3gZGFyAC6vgBQAMREA2/YfDPxyaDQNyTNz+3Zwn5J4ZG7PB2h0kHhi7plPCImmJwkPzO0RMa3OET0i5uGlzHFze0xcu0vE2Dq3J4U2vEPgSaHbFzPNDQAAAAAAAMBNovdw+cP/ny+uaf7w/+eYADy8kE+F4Offdjn6zZXhAXgiA78G4MNNsmnu1Xr7b3mbOL8T5Ja5bw/A35EC2LiWpzt1y9jRugBy30fLg3NvHPvnuZcC2NsCUXA/aRmA89V07Fwgt37uH8deCmBr6N44pP4UgaUATpdA7v/cMbIB8okliY65/SW5HhJ1ehPmM+8edwXgpbu4R88FayR32Y/P7oZZbOx13/Zr//ZHx27bAPnkFoyewYlbAhD3TvBobr95gaUAtr1EdNx1lgI4OcTTuR3z6+FZMEDRcu9ZCuDgGCdyGxMa4EgBRMvcjrkM7NgBZw5c0TwAUWUhZwRXA2xaya65Xa3jO2qYZ8bu2AD5w38tG5V8aZpoGN6Tz0bOfa9bceyWAciTO0jWyO1Tc5cLwJmF/JfPnXVyu3/slgHIg1n79O2O5fZv+1cHV7sC2HYqmUdHysNzX3sVkMcjUK5Gc+dMs28E5bGtm0V3gloBOP9vgZv+4sYn3RUaYFMCol5uN77g6lUApc8pWs69Zn7snS9Z9Q8G0S0AUTVUUTG3A54R1KSvo/diLAv5fKzynZeN6xogC75u93+AtBTA47OlAFSv6qY/vp3DAjD8iv2ZdFYJwKynMhTK1rInPfzaxW81LnvSgFP9KxrATaCLA3DxHpbFX31ZyNm5XRZyXG5bNkAWfP0rcrsUwOgC6NIAzgBcBiqAWwPgLrAGuGBP6jr2sifdfiJ6QQM4Bbw4AK4B3129ZSFn53ZZyA/GyFty27IBFMDFAXAG8PbyLQv5xULGPRl0K3h2AbwcgCZPhs+LD1zLnjS6AN4NwMU/DVFh7LyhASreTbvqrxdr/J4XT4Swz4FrTS+AGJ7bNbwAYkxuWzZAVljHrJfbjb9wviYXwFO/FJ8Vli4vaICsEMFyBbA3tmtsAUS0zG1c/bj4YwsZH2/+Whd0+1Nb+S7IE2sfPw4RL0XmsR8Nqvz7qFngmPHF34EqjP15AAofAkosZKPC/K6FVoeP02Ehi540NG6AK/4pYP3cLgVwXwHkDQ1QcSGb/uF4WwCmfX8u/+4vgLINcMUlQIfcLgXwXAF0+BGkpQDuuJx7/hwgpu//cWVuO3wxJOz/z8297vgYBwaIO3O7Kn+c194578ltywbIgu8fl+Z2lS+APvnLjnOv8hsgSqxjgwL4Ln9LAezaj98tgPzy7ZcC+GQzxrWxXQpgx370dm6/H7v6jaBoso5dY1swAFlwHWvfBf5pxVa93fCtdx64+1dsgCy4joWvAfPX9VoKYMs6Zse9/8Mlvv7LILlhAfKFFdsSutJXAdFkL3qlADJPrXFcXAC5KYaH586jO9mtAch9S3T0GQJ726ZWAE49kjP3rlDJuetdaL/1zeqZY9c7CRz7s0wCUPxienQBnAuAAtAAlxaAAAxfyBQABSAACkAAFIAAKAABUAACMEkKwL170oh7V8ueNLoAjgTAXWAN4BRwcABcA2oABTA4AApAAyiAwQFQABpAAQwOgALQADMWUgCuEmNyu15fSIY3gFPAiwPgFFADKIDBAVAAGkABCIACmBqAUAAaQAHMDUCMWkgBuMWw3K43F5LhDeAU8OIAuAmkARTA4AAoAA2gAARAAUwNgLvAGkABDA6Au8AaoKOJuV0vLSTDG8Ap4MUBcBNIAyiAwQFQABpAAQwOgALQAApAABTA1AC4C6wBOhqb23V+IRneAE4BLw6Aa0ANoAAGB0ABaAAFMDgACkADKAABUABTA+AusAboKATAQs4trjV+IYcfuJYCcA6gAATAQk69dFkKQANYyLkFcLIBFIDLQAVwawDsSRrAEWBwAJwCagAFMDgACkADKIDBAVAAGkABCIACmBoAzwXWAApgcADsSRrg0iNACoACEADXgAIwdCFTACykALgGFIAfl0kBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPBv/gN+IH8U6YveYgAAAABJRU5ErkJggg==&labelColor=white)](https://aistudio.baidu.com/community/app/518493/webUI)
60
+
61
+
62
+ - **PaddleOCR-VL - Multilingual Document Parsing via a 0.9B VLM**
63
+ **The SOTA and resource-efficient model tailored for document parsing**, that supports 109 languages and excels in recognizing complex elements (e.g., text, tables, formulas, and charts), while maintaining minimal resource consumption.
64
+
65
+ - **PP-OCRv5 — Universal Scene Text Recognition**
66
+ **Single model supports five text types** (Simplified Chinese, Traditional Chinese, English, Japanese, and Pinyin) with **13% accuracy improvement**. Solves multilingual mixed document recognition challenges.
67
+
68
+ - **PP-StructureV3 — Complex Document Parsing**
69
+ Intelligently converts complex PDFs and document images into **Markdown and JSON files that preserve original structure**. **Outperforms** numerous commercial solutions in public benchmarks. **Perfectly maintains document layout and hierarchical structure**.
70
+
71
+ - **PP-ChatOCRv4 — Intelligent Information Extraction**
72
+ Natively integrates ERNIE 4.5 to **precisely extract key information** from massive documents, with 15% accuracy improvement over previous generation. Makes documents "**understand**" your questions and provide accurate answers.
73
+
74
+ In addition to providing an outstanding model library, PaddleOCR 3.0 also offers user-friendly tools covering model training, inference, and service deployment, so developers can rapidly bring AI applications to production.
75
+ <div align="center">
76
+ <p>
77
+ <img width="100%" src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/paddleocr/README/Arch.jpg" alt="PaddleOCR Architecture">
78
+ </p>
79
+ </div>
80
+
81
+ **Special Note**: PaddleOCR 3.x introduces several significant interface changes. **Old code written based on PaddleOCR 2.x is likely incompatible with PaddleOCR 3.x**. Please ensure that the documentation you are reading matches the version of PaddleOCR you are using. [This document](https://paddlepaddle.github.io/PaddleOCR/latest/en/update/upgrade_notes.html) explains the reasons for the upgrade and the major changes from PaddleOCR 2.x to 3.x.
82
+
83
+ ## 📣 Recent updates
84
+
85
+ ### 🔥🔥 2025.10.16: PaddleOCR 3.3.0 released, includes:
86
+
87
+ - Released PaddleOCR-VL:
88
+ - **Model Introduction**:
89
+ - **PaddleOCR-VL** is a SOTA and resource-efficient model tailored for document parsing. Its core component is PaddleOCR-VL-0.9B, a compact yet powerful vision-language model (VLM) that integrates a NaViT-style dynamic resolution visual encoder with the ERNIE-4.5-0.3B language model to enable accurate element recognition. **This innovative model efficiently supports 109 languages and excels in recognizing complex elements (e.g., text, tables, formulas, and charts), while maintaining minimal resource consumption**. Through comprehensive evaluations on widely used public benchmarks and in-house benchmarks, PaddleOCR-VL achieves SOTA performance in both page-level document parsing and element-level recognition. It significantly outperforms existing solutions, exhibits strong competitiveness against top-tier VLMs, and delivers fast inference speeds. These strengths make it highly suitable for practical deployment in real-world scenarios. The model has been released on [HuggingFace](https://huggingface.co/PaddlePaddle/PaddleOCR-VL). Everyone is welcome to download and use it! More introduction infomation can be found in [PaddleOCR-VL](https://www.paddleocr.ai/latest/version3.x/algorithm/PaddleOCR-VL/PaddleOCR-VL.html).
90
+
91
+ - **Core Features**:
92
+ - **Compact yet Powerful VLM Architecture**: We present a novel vision-language model that is specifically designed for resource-efficient inference, achieving outstanding performance in element recognition. By integrating a NaViT-style dynamic high-resolution visual encoder with the lightweight ERNIE-4.5-0.3B language model, we significantly enhance the model’s recognition capabilities and decoding efficiency. This integration maintains high accuracy while reducing computational demands, making it well-suited for efficient and practical document processing applications.
93
+ - **SOTA Performance on Document Parsing**: PaddleOCR-VL achieves state-of-the-art performance in both page-level document parsing and element-level recognition. It significantly outperforms existing pipeline-based solutions and exhibiting strong competitiveness against leading vision-language models (VLMs) in document parsing. Moreover, it excels in recognizing complex document elements, such as text, tables, formulas, and charts, making it suitable for a wide range of challenging content types, including handwritten text and historical documents. This makes it highly versatile and suitable for a wide range of document types and scenarios.
94
+ - **Multilingual Support**: PaddleOCR-VL Supports 109 languages, covering major global languages, including but not limited to Chinese, English, Japanese, Latin, and Korean, as well as languages with different scripts and structures, such as Russian (Cyrillic script), Arabic, Hindi (Devanagari script), and Thai. This broad language coverage substantially enhances the applicability of our system to multilingual and globalized document processing scenarios.
95
+
96
+ - Released PP-OCRv5 Multilingual Recognition Model:
97
+ - Improved the accuracy and coverage of Latin script recognition; added support for Cyrillic, Arabic, Devanagari, Telugu, Tamil, and other language systems, covering recognition of 109 languages. The model has only 2M parameters, and the accuracy of some models has increased by over 40% compared to the previous generation.
98
+
99
+
100
+ <details>
101
+ <summary><strong>2025.08.21: Release of PaddleOCR 3.2.0</strong></summary>
102
+
103
+ - **Significant Model Additions:**
104
+ - Introduced training, inference, and deployment for PP-OCRv5 recognition models in English, Thai, and Greek. **The PP-OCRv5 English model delivers an 11% improvement in English scenarios compared to the main PP-OCRv5 model, with the Thai and Greek recognition models achieving accuracies of 82.68% and 89.28%, respectively.**
105
+
106
+ - **Deployment Capability Upgrades:**
107
+ - **Full support for PaddlePaddle framework versions 3.1.0 and 3.1.1.**
108
+ - **Comprehensive upgrade of the PP-OCRv5 C++ local deployment solution, now supporting both Linux and Windows, with feature parity and identical accuracy to the Python implementation.**
109
+ - **High-performance inference now supports CUDA 12, and inference can be performed using either the Paddle Inference or ONNX Runtime backends.**
110
+ - **The high-stability service-oriented deployment solution is now fully open-sourced, allowing users to customize Docker images and SDKs as required.**
111
+ - The high-stability service-oriented deployment solution also supports invocation via manually constructed HTTP requests, enabling client-side code development in any programming language.
112
+
113
+ - **Benchmark Support:**
114
+ - **All production lines now support fine-grained benchmarking, enabling measurement of end-to-end inference time as well as per-layer and per-module latency data to assist with performance analysis. [Here's](docs/version3.x/pipeline_usage/instructions/benchmark.en.md) how to set up and use the benchmark feature.**
115
+ - **Documentation has been updated to include key metrics for commonly used configurations on mainstream hardware, such as inference latency and memory usage, providing deployment references for users.**
116
+
117
+ - **Bug Fixes:**
118
+ - Resolved the issue of failed log saving during model training.
119
+ - Upgraded the data augmentation component for formula models for compatibility with newer versions of the albumentations dependency, and fixed deadlock warnings when using the tokenizers package in multi-process scenarios.
120
+ - Fixed inconsistencies in switch behaviors (e.g., `use_chart_parsing`) in the PP-StructureV3 configuration files compared to other pipelines.
121
+
122
+ - **Other Enhancements:**
123
+ - **Separated core and optional dependencies. Only minimal core dependencies are required for basic text recognition; additional dependencies for document parsing and information extraction can be installed as needed.**
124
+ - **Enabled support for NVIDIA RTX 50 series graphics cards on Windows; users can refer to the [installation guide](docs/version3.x/installation.en.md) for the corresponding PaddlePaddle framework versions.**
125
+ - **PP-OCR series models now support returning single-character coordinates.**
126
+ - Added AIStudio, ModelScope, and other model download sources, allowing users to specify the source for model downloads.
127
+ - Added support for chart-to-table conversion via the PP-Chart2Table module.
128
+ - Optimized documentation descriptions to improve usability.
129
+ </details>
130
+
131
+ <details>
132
+ <summary><strong>2025.08.15: PaddleOCR 3.1.1 Released</strong></summary>
133
+
134
+ - **Bug Fixes:**
135
+ - Added the missing methods `save_vector`, `save_visual_info_list`, `load_vector`, and `load_visual_info_list` in the `PP-ChatOCRv4` class.
136
+ - Added the missing parameters `glossary` and `llm_request_interval` to the `translate` method in the `PPDocTranslation` class.
137
+
138
+ - **Documentation Improvements:**
139
+ - Added a demo to the MCP documentation.
140
+ - Added information about the PaddlePaddle and PaddleOCR version used for performance metrics testing in the documentation.
141
+ - Fixed errors and omissions in the production line document translation.
142
+
143
+ - **Others:**
144
+ - Changed the MCP server dependency to use the pure Python library `puremagic` instead of `python-magic` to reduce installation issues.
145
+ - Retested PP-OCRv5 performance metrics with PaddleOCR version 3.1.0 and updated the documentation.
146
+
147
+ </details>
148
+
149
+ <details>
150
+ <summary><strong>2025.06.29: PaddleOCR 3.1.0 Released</strong></summary>
151
+
152
+ - **Key Models and Pipelines:**
153
+ - **Added PP-OCRv5 Multilingual Text Recognition Model**, which supports the training and inference process for text recognition models in 37 languages, including French, Spanish, Portuguese, Russian, Korean, etc. **Average accuracy improved by over 30%.** [Details](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/algorithm/PP-OCRv5/PP-OCRv5_multi_languages.html)
154
+ - Upgraded the **PP-Chart2Table model** in PP-StructureV3, further enhancing the capability of converting charts to tables. On internal custom evaluation sets, the metric (RMS-F1) **increased by 9.36 percentage points (71.24% -> 80.60%).**
155
+ - Newly launched **document translation pipeline, PP-DocTranslation, based on PP-StructureV3 and ERNIE 4.5**, which supports the translation of Markdown format documents, various complex-layout PDF documents, and document images, with the results saved as Markdown format documents. [Details](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/pipeline_usage/PP-DocTranslation.html)
156
+
157
+
158
+ - **New MCP server:** [Details](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/deployment/mcp_server.html)
159
+ - **Supports both OCR and PP-StructureV3 pipelines.**
160
+ - Supports three working modes: local Python library, AIStudio Community Cloud Service, and self-hosted service.
161
+ - Supports invoking local services via stdio and remote services via Streamable HTTP.
162
+
163
+ - **Documentation Optimization:** Improved the descriptions in some user guides for a smoother reading experience.
164
+
165
+ </details>
166
+
167
+ <details>
168
+ <summary><strong>2025.06.26: PaddleOCR 3.0.3 Released</strong></summary>
169
+ - Bug Fix: Resolved the issue where the `enable_mkldnn` parameter was not effective, restoring the default behavior of using MKL-DNN for CPU inference.
170
+ </details>
171
+
172
+ <details>
173
+ <summary><strong>2025.06.19: PaddleOCR 3.0.2 Released</strong></summary>
174
+ - **New Features:**
175
+
176
+ - The default download source has been changed from `BOS` to `HuggingFace`. Users can also change the environment variable `PADDLE_PDX_MODEL_SOURCE` to `BOS` to set the model download source back to Baidu Object Storage (BOS).
177
+ - Added service invocation examples for six languages—C++, Java, Go, C#, Node.js, and PHP—for pipelines like PP-OCRv5, PP-StructureV3, and PP-ChatOCRv4.
178
+ - Improved the layout partition sorting algorithm in the PP-StructureV3 pipeline, enhancing the sorting logic for complex vertical layouts to deliver better results.
179
+ - Enhanced model selection logic: when a language is specified but a model version is not, the system will automatically select the latest model version supporting that language.
180
+ - Set a default upper limit for MKL-DNN cache size to prevent unlimited growth, while also allowing users to configure cache capacity.
181
+ - Updated default configurations for high-performance inference to support Paddle MKL-DNN acceleration and optimized the logic for automatic configuration selection for smarter choices.
182
+ - Adjusted the logic for obtaining the default device to consider the actual support for computing devices by the installed Paddle framework, making program behavior more intuitive.
183
+ - Added Android example for PP-OCRv5. [Details](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/deployment/on_device_deployment.html).
184
+
185
+ - **Bug Fixes:**
186
+ - Fixed an issue with some CLI parameters in PP-StructureV3 not taking effect.
187
+ - Resolved an issue where `export_paddlex_config_to_yaml` would not function correctly in certain cases.
188
+ - Corrected the discrepancy between the actual behavior of `save_path` and its documentation description.
189
+ - Fixed potential multithreading errors when using MKL-DNN in basic service deployment.
190
+ - Corrected channel order errors in image preprocessing for the Latex-OCR model.
191
+ - Fixed channel order errors in saving visualized images within the text recognition module.
192
+ - Resolved channel order errors in visualized table results within PP-StructureV3 pipeline.
193
+ - Fixed an overflow issue in the calculation of `overlap_ratio` under extremely special circumstances in the PP-StructureV3 pipeline.
194
+
195
+ - **Documentation Improvements:**
196
+ - Updated the description of the `enable_mkldnn` parameter in the documentation to accurately reflect the program's actual behavior.
197
+ - Fixed errors in the documentation regarding the `lang` and `ocr_version` parameters.
198
+ - Added instructions for exporting pipeline configuration files via CLI.
199
+ - Fixed missing columns in the performance data table for PP-OCRv5.
200
+ - Refined benchmark metrics for PP-StructureV3 across different configurations.
201
+
202
+ - **Others:**
203
+
204
+ - Relaxed version restrictions on dependencies like numpy and pandas, restoring support for Python 3.12.
205
+ </details>
206
+
207
+ <details>
208
+ <summary><strong>History Log</strong></summary>
209
+
210
+ 2025.06.05: **PaddleOCR 3.0.1 Released**, includes:
211
+
212
+ - **Optimisation of certain models and model configurations:**
213
+ - Updated the default model configuration for PP-OCRv5, changing both detection and recognition from mobile to server models. To improve default performance in most scenarios, the parameter `limit_side_len` in the configuration has been changed from 736 to 64.
214
+ - Added a new text line orientation classification model `PP-LCNet_x1_0_textline_ori` with an accuracy of 99.42%. The default text line orientation classifier for OCR, PP-StructureV3, and PP-ChatOCRv4 pipelines has been updated to this model.
215
+ - Optimized the text line orientation classification model `PP-LCNet_x0_25_textline_ori`, improving accuracy by 3.3 percentage points to a current accuracy of 98.85%.
216
+ - **Optimizations and fixes for some issues in version 3.0.0, [details](https://paddlepaddle.github.io/PaddleOCR/latest/en/update/update.html)**
217
+
218
+ 🔥🔥2025.05.20: Official Release of **PaddleOCR v3.0**, including:
219
+ - **PP-OCRv5**: High-Accuracy Text Recognition Model for All Scenarios - Instant Text from Images/PDFs.
220
+ 1. 🌐 Single-model support for **five** text types - Seamlessly process **Simplified Chinese, Traditional Chinese, Simplified Chinese Pinyin, English** and **Japanese** within a single model.
221
+ 2. ✍️ Improved **handwriting recognition**: Significantly better at complex cursive scripts and non-standard handwriting.
222
+ 3. 🎯 **13-point accuracy gain** over PP-OCRv4, achieving state-of-the-art performance across a variety of real-world scenarios.
223
+
224
+ - **PP-StructureV3**: General-Purpose Document Parsing – Unleash SOTA Images/PDFs Parsing for Real-World Scenarios!
225
+ 1. 🧮 **High-Accuracy multi-scene PDF parsing**, leading both open- and closed-source solutions on the OmniDocBench benchmark.
226
+ 2. 🧠 Specialized capabilities include **seal recognition**, **chart-to-table conversion**, **table recognition with nested formulas/images**, **vertical text document parsing**, and **complex table structure analysis**.
227
+
228
+ - **PP-ChatOCRv4**: Intelligent Document Understanding – Extract Key Information, not just text from Images/PDFs.
229
+ 1. 🔥 **15-point accuracy gain** in key-information extraction on PDF/PNG/JPG files over the previous generation.
230
+ 2. 💻 Native support for **ERNIE 4.5**, with compatibility for large-model deployments via PaddleNLP, Ollama, vLLM, and more.
231
+ 3. 🤝 Integrated [PP-DocBee2](https://github.com/PaddlePaddle/PaddleMIX/tree/develop/paddlemix/examples/ppdocbee2), enabling extraction and understanding of printed text, handwriting, seals, tables, charts, and other common elements in complex documents.
232
+
233
+ [History Log](https://paddlepaddle.github.io/PaddleOCR/latest/en/update/update.html)
234
+
235
+ </details>
236
+
237
+ ## ⚡ Quick Start
238
+ ### 1. Run online demo
239
+ [![AI Studio](https://img.shields.io/badge/PP_OCRv5-AI_Studio-green)](https://aistudio.baidu.com/community/app/91660/webUI)
240
+ [![AI Studio](https://img.shields.io/badge/PP_StructureV3-AI_Studio-green)](https://aistudio.baidu.com/community/app/518494/webUI)
241
+ [![AI Studio](https://img.shields.io/badge/PP_ChatOCRv4-AI_Studio-green)](https://aistudio.baidu.com/community/app/518493/webUI)
242
+
243
+ ### 2. Installation
244
+
245
+ Install PaddlePaddle refer to [Installation Guide](https://www.paddlepaddle.org.cn/en/install/quick?docurl=/documentation/docs/en/develop/install/pip/linux-pip_en.html), after then, install the PaddleOCR toolkit.
246
+
247
+ ```bash
248
+ # If you only want to use the basic text recognition feature (returns text position coordinates and content), including the PP-OCR series
249
+ python -m pip install paddleocr
250
+ # If you want to use all features such as document parsing, document understanding, document translation, key information extraction, etc.
251
+ # python -m pip install "paddleocr[all]"
252
+ ```
253
+
254
+ Starting from version 3.2.0, in addition to the `all` dependency group demonstrated above, PaddleOCR also supports installing partial optional features by specifying other dependency groups. All dependency groups provided by PaddleOCR are as follows:
255
+
256
+ | Dependency Group Name | Corresponding Functionality |
257
+ | - | - |
258
+ | `doc-parser` | Document parsing: can be used to extract layout elements such as tables, formulas, stamps, images, etc. from documents; includes models like PP-StructureV3, PaddleOCR-VL |
259
+ | `ie` | Information extraction: can be used to extract key information from documents, such as names, dates, addresses, amounts, etc.; includes models like PP-ChatOCRv4 |
260
+ | `trans` | Document translation: can be used to translate documents from one language to another; includes models like PP-DocTranslation |
261
+ | `all` | Complete functionality |
262
+
263
+ ### 3. Run inference by CLI
264
+ ```bash
265
+ # Run PP-OCRv5 inference
266
+ paddleocr ocr -i https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_002.png --use_doc_orientation_classify False --use_doc_unwarping False --use_textline_orientation False
267
+
268
+ # Run PP-StructureV3 inference
269
+ paddleocr pp_structurev3 -i https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pp_structure_v3_demo.png --use_doc_orientation_classify False --use_doc_unwarping False
270
+
271
+ # Get the Qianfan API Key at first, and then run PP-ChatOCRv4 inference
272
+ paddleocr pp_chatocrv4_doc -i https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_certificate-1.png -k 驾驶室准乘人数 --qianfan_api_key your_api_key --use_doc_orientation_classify False --use_doc_unwarping False
273
+
274
+ # Run PaddleOCR-VL inference
275
+ paddleocr doc_parser -i https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/paddleocr_vl_demo.png
276
+
277
+ # Get more information about "paddleocr ocr"
278
+ paddleocr ocr --help
279
+ ```
280
+
281
+ ### 4. Run inference by API
282
+ **4.1 PP-OCRv5 Example**
283
+ ```python
284
+ # Initialize PaddleOCR instance
285
+ from paddleocr import PaddleOCR
286
+ ocr = PaddleOCR(
287
+ use_doc_orientation_classify=False,
288
+ use_doc_unwarping=False,
289
+ use_textline_orientation=False)
290
+
291
+ # Run OCR inference on a sample image
292
+ result = ocr.predict(
293
+ input="https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_002.png")
294
+
295
+ # Visualize the results and save the JSON results
296
+ for res in result:
297
+ res.print()
298
+ res.save_to_img("output")
299
+ res.save_to_json("output")
300
+ ```
301
+
302
+ <details>
303
+ <summary><strong>4.2 PP-StructureV3 Example</strong></summary>
304
+
305
+ ```python
306
+ from pathlib import Path
307
+ from paddleocr import PPStructureV3
308
+
309
+ pipeline = PPStructureV3(
310
+ use_doc_orientation_classify=False,
311
+ use_doc_unwarping=False
312
+ )
313
+
314
+ # For Image
315
+ output = pipeline.predict(
316
+ input="https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pp_structure_v3_demo.png",
317
+ )
318
+
319
+ # Visualize the results and save the JSON results
320
+ for res in output:
321
+ res.print()
322
+ res.save_to_json(save_path="output")
323
+ res.save_to_markdown(save_path="output")
324
+ ```
325
+
326
+ </details>
327
+
328
+ <details>
329
+ <summary><strong>4.3 PP-ChatOCRv4 Example</strong></summary>
330
+
331
+ ```python
332
+ from paddleocr import PPChatOCRv4Doc
333
+
334
+ chat_bot_config = {
335
+ "module_name": "chat_bot",
336
+ "model_name": "ernie-3.5-8k",
337
+ "base_url": "https://qianfan.baidubce.com/v2",
338
+ "api_type": "openai",
339
+ "api_key": "api_key", # your api_key
340
+ }
341
+
342
+ retriever_config = {
343
+ "module_name": "retriever",
344
+ "model_name": "embedding-v1",
345
+ "base_url": "https://qianfan.baidubce.com/v2",
346
+ "api_type": "qianfan",
347
+ "api_key": "api_key", # your api_key
348
+ }
349
+
350
+ pipeline = PPChatOCRv4Doc(
351
+ use_doc_orientation_classify=False,
352
+ use_doc_unwarping=False
353
+ )
354
+
355
+ visual_predict_res = pipeline.visual_predict(
356
+ input="https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_certificate-1.png",
357
+ use_common_ocr=True,
358
+ use_seal_recognition=True,
359
+ use_table_recognition=True,
360
+ )
361
+
362
+ mllm_predict_info = None
363
+ use_mllm = False
364
+ # If a multimodal large model is used, the local mllm service needs to be started. You can refer to the documentation: https://github.com/PaddlePaddle/PaddleX/blob/release/3.0/docs/pipeline_usage/tutorials/vlm_pipelines/doc_understanding.en.md performs deployment and updates the mllm_chat_bot_config configuration.
365
+ if use_mllm:
366
+ mllm_chat_bot_config = {
367
+ "module_name": "chat_bot",
368
+ "model_name": "PP-DocBee",
369
+ "base_url": "http://127.0.0.1:8080/", # your local mllm service url
370
+ "api_type": "openai",
371
+ "api_key": "api_key", # your api_key
372
+ }
373
+
374
+ mllm_predict_res = pipeline.mllm_pred(
375
+ input="https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_certificate-1.png",
376
+ key_list=["驾驶室准乘人数"],
377
+ mllm_chat_bot_config=mllm_chat_bot_config,
378
+ )
379
+ mllm_predict_info = mllm_predict_res["mllm_res"]
380
+
381
+ visual_info_list = []
382
+ for res in visual_predict_res:
383
+ visual_info_list.append(res["visual_info"])
384
+ layout_parsing_result = res["layout_parsing_result"]
385
+
386
+ vector_info = pipeline.build_vector(
387
+ visual_info_list, flag_save_bytes_vector=True, retriever_config=retriever_config
388
+ )
389
+ chat_result = pipeline.chat(
390
+ key_list=["驾驶室准乘人数"],
391
+ visual_info=visual_info_list,
392
+ vector_info=vector_info,
393
+ mllm_predict_info=mllm_predict_info,
394
+ chat_bot_config=chat_bot_config,
395
+ retriever_config=retriever_config,
396
+ )
397
+ print(chat_result)
398
+ ```
399
+
400
+ </details>
401
+
402
+ <details>
403
+ <summary><strong>4.4 PaddleOCR-VL Example</strong></summary>
404
+
405
+ ```python
406
+ from paddleocr import PaddleOCRVL
407
+
408
+ pipeline = PaddleOCRVL()
409
+ output = pipeline.predict("https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/paddleocr_vl_demo.png")
410
+ for res in output:
411
+ res.print()
412
+ res.save_to_json(save_path="output")
413
+ res.save_to_markdown(save_path="output")
414
+ ```
415
+
416
+ </details>
417
+
418
+ ### 5. Chinese Heterogeneous AI Accelerators
419
+ - [Huawei Ascend](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/other_devices_support/paddlepaddle_install_NPU.html)
420
+ - [KUNLUNXIN](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/other_devices_support/paddlepaddle_install_XPU.html)
421
+
422
+ ## 🧩 More Features
423
+
424
+ - Convert models to ONNX format: [Obtaining ONNX Models](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/deployment/obtaining_onnx_models.html).
425
+ - Accelerate inference using engines like OpenVINO, ONNX Runtime, TensorRT, or perform inference using ONNX format models: [High-Performance Inference](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/deployment/high_performance_inference.html).
426
+ - Accelerate inference using multi-GPU and multi-process: [Parallel Inference for Pipelines](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/pipeline_usage/instructions/parallel_inference.html).
427
+ - Integrate PaddleOCR into applications written in C++, C#, Java, etc.: [Serving](https://paddlepaddle.github.io/PaddleOCR/latest/en/version3.x/deployment/serving.html).
428
+
429
+ ## ⛰️ Advanced Tutorials
430
+
431
+ - [PP-OCRv5 Tutorial](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/pipeline_usage/OCR.html)
432
+ - [PP-StructureV3 Tutorial](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/pipeline_usage/PP-StructureV3.html)
433
+ - [PP-ChatOCRv4 Tutorial](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/pipeline_usage/PP-ChatOCRv4.html)
434
+ - [PaddleOCR-VL Tutorial](https://paddlepaddle.github.io/PaddleOCR/latest/version3.x/pipeline_usage/PaddleOCR-VL.html)
435
+
436
+ ## 🔄 Quick Overview of Execution Results
437
+
438
+ ### PP-OCRv5
439
+
440
+ <div align="center">
441
+ <p>
442
+ <img width="100%" src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/paddleocr/README/PP-OCRv5_demo.gif" alt="PP-OCRv5 Demo">
443
+ </p>
444
+ </div>
445
+
446
+
447
+
448
+ ### PP-StructureV3
449
+
450
+ <div align="center">
451
+ <p>
452
+ <img width="100%" src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/paddleocr/README/PP-StructureV3_demo.gif" alt="PP-StructureV3 Demo">
453
+ </p>
454
+ </div>
455
+
456
+ ### PaddleOCR-VL
457
+
458
+ <div align="center">
459
+ <p>
460
+ <img width="100%" src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/paddleocr/README/PaddleOCR-VL_demo.gif" alt="PP-StructureV3 Demo">
461
+ </p>
462
+ </div>
463
+
464
+
465
+ ## ✨ Stay Tuned
466
+
467
+ ⭐ **Star this repository to keep up with exciting updates and new releases, including powerful OCR and document parsing capabilities!** ⭐
468
+
469
+ <div align="center">
470
+ <p>
471
+ <img width="1200" src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/main/images/paddleocr/README/star_paddleocr.en.gif" alt="Star-Project">
472
+ </p>
473
+ </div>
474
+
475
+ ## 👩‍👩‍👧‍👦 Community
476
+
477
+ <div align="center">
478
+
479
+ | PaddlePaddle WeChat official account | Join the tech discussion group |
480
+ | :---: | :---: |
481
+ | <img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/refs/heads/main/images/paddleocr/README/qrcode_for_paddlepaddle_official_account.jpg" width="150"> | <img src="https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/refs/heads/main/images/paddleocr/README/qr_code_for_the_questionnaire.jpg" width="150"> |
482
+ </div>
483
+
484
+
485
+ ## 😃 Awesome Projects Leveraging PaddleOCR
486
+ PaddleOCR wouldn't be where it is today without its incredible community! 💗 A massive thank you to all our longtime partners, new collaborators, and everyone who's poured their passion into PaddleOCR — whether we've named you or not. Your support fuels our fire!
487
+
488
+ <div align="center">
489
+
490
+ | Project Name | Description |
491
+ | ------------ | ----------- |
492
+ | [RAGFlow](https://github.com/infiniflow/ragflow) <a href="https://github.com/infiniflow/ragflow"><img src="https://img.shields.io/github/stars/infiniflow/ragflow"></a>|RAG engine based on deep document understanding.|
493
+ | [pathway](https://github.com/pathwaycom/pathway) <a href="https://github.com/pathwaycom/pathway"><img src="https://img.shields.io/github/stars/pathwaycom/pathway"></a>|Python ETL framework for stream processing, real-time analytics, LLM pipelines, and RAG.|
494
+ | [MinerU](https://github.com/opendatalab/MinerU) <a href="https://github.com/opendatalab/MinerU"><img src="https://img.shields.io/github/stars/opendatalab/MinerU"></a>|Multi-type Document to Markdown Conversion Tool|
495
+ | [Umi-OCR](https://github.com/hiroi-sora/Umi-OCR) <a href="https://github.com/hiroi-sora/Umi-OCR"><img src="https://img.shields.io/github/stars/hiroi-sora/Umi-OCR"></a>|Free, Open-source, Batch Offline OCR Software.|
496
+ | [cherry-studio](https://github.com/CherryHQ/cherry-studio) <a href="https://github.com/CherryHQ/cherry-studio"><img src="https://img.shields.io/github/stars/CherryHQ/cherry-studio"></a>|A desktop client that supports for multiple LLM providers.|
497
+ | [OmniParser](https://github.com/microsoft/OmniParser)<a href="https://github.com/microsoft/OmniParser"><img src="https://img.shields.io/github/stars/microsoft/OmniParser"></a> |OmniParser: Screen Parsing tool for Pure Vision Based GUI Agent.|
498
+ | [QAnything](https://github.com/netease-youdao/QAnything)<a href="https://github.com/netease-youdao/QAnything"><img src="https://img.shields.io/github/stars/netease-youdao/QAnything"></a> |Question and Answer based on Anything.|
499
+ | [PDF-Extract-Kit](https://github.com/opendatalab/PDF-Extract-Kit) <a href="https://github.com/opendatalab/PDF-Extract-Kit"><img src="https://img.shields.io/github/stars/opendatalab/PDF-Extract-Kit"></a>|A powerful open-source toolkit designed to efficiently extract high-quality content from complex and diverse PDF documents.|
500
+ | [Dango-Translator](https://github.com/PantsuDango/Dango-Translator)<a href="https://github.com/PantsuDango/Dango-Translator"><img src="https://img.shields.io/github/stars/PantsuDango/Dango-Translator"></a> |Recognize text on the screen, translate it and show the translation results in real time.|
501
+ | [Learn more projects](./awesome_projects.md) | [More projects based on PaddleOCR](./awesome_projects.md)|
502
+ </div>
503
+
504
+ ## 👩‍👩‍👧‍👦 Contributors
505
+
506
+ <div align="center">
507
+ <a href="https://github.com/PaddlePaddle/PaddleOCR/graphs/contributors">
508
+ <img src="https://contrib.rocks/image?repo=PaddlePaddle/PaddleOCR&max=400&columns=20" width="800"/>
509
+ </a>
510
+ </div>
511
+
512
+ ## 🌟 Star
513
+
514
+ <div align="center">
515
+ <p>
516
+ <img width="800" src="https://api.star-history.com/svg?repos=PaddlePaddle/PaddleOCR&type=Date" alt="Star-history">
517
+ </p>
518
+ </div>
519
+
520
+
521
+ ## 📄 License
522
+ This project is released under the [Apache 2.0 license](LICENSE).
523
+
524
+ ## 🎓 Citation
525
+
526
+ ```bibtex
527
+ @misc{cui2025paddleocr30technicalreport,
528
+ title={PaddleOCR 3.0 Technical Report},
529
+ author={Cheng Cui and Ting Sun and Manhui Lin and Tingquan Gao and Yubo Zhang and Jiaxuan Liu and Xueqing Wang and Zelun Zhang and Changda Zhou and Hongen Liu and Yue Zhang and Wenyu Lv and Kui Huang and Yichao Zhang and Jing Zhang and Jun Zhang and Yi Liu and Dianhai Yu and Yanjun Ma},
530
+ year={2025},
531
+ eprint={2507.05595},
532
+ archivePrefix={arXiv},
533
+ primaryClass={cs.CV},
534
+ url={https://arxiv.org/abs/2507.05595},
535
+ }
536
+
537
+ @misc{cui2025paddleocrvlboostingmultilingualdocument,
538
+ title={PaddleOCR-VL: Boosting Multilingual Document Parsing via a 0.9B Ultra-Compact Vision-Language Model},
539
+ author={Cheng Cui and Ting Sun and Suyin Liang and Tingquan Gao and Zelun Zhang and Jiaxuan Liu and Xueqing Wang and Changda Zhou and Hongen Liu and Manhui Lin and Yue Zhang and Yubo Zhang and Handong Zheng and Jing Zhang and Jun Zhang and Yi Liu and Dianhai Yu and Yanjun Ma},
540
+ year={2025},
541
+ eprint={2510.14528},
542
+ archivePrefix={arXiv},
543
+ primaryClass={cs.CV},
544
+ url={https://arxiv.org/abs/2510.14528},
545
+ }
546
+ ```
applications/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ 移步[docs](https://www.paddleocr.ai/v2.10.0/applications/overview.html)
awesome_projects.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## 😃 Awesome projects based on PaddleOCR
2
+ 💗 PaddleOCR wouldn’t be where it is today without its incredible community! A massive 🙌 thank you 🙌 to all our longtime partners, new collaborators, and everyone who’s poured their passion into PaddleOCR — whether we’ve named you or not. Your support fuels our fire! 🔥
3
+ | Project Name | Description |
4
+ | ------------ | ----------- |
5
+ | [Umi-OCR](https://github.com/hiroi-sora/Umi-OCR) <a href="https://github.com/hiroi-sora/Umi-OCR"><img src="https://img.shields.io/github/stars/hiroi-sora/Umi-OCR"></a>|Free, Open-source, Batch Offline OCR Software.|
6
+ | [LearnOpenCV](http://github.com/spmallick/learnopencv) <a href="http://github.com/spmallick/learnopencv"><img src="https://img.shields.io/github/stars/spmallick/learnopencv"></a> | code for Computer Vision, Deep learning, and AI research articles.|
7
+ | [OmniParser](https://github.com/microsoft/OmniParser)<a href="https://github.com/microsoft/OmniParser"><img src="https://img.shields.io/github/stars/microsoft/OmniParser"></a> |OmniParser: Screen Parsing tool for Pure Vision Based GUI Agent.|
8
+ | [QAnything](https://github.com/netease-youdao/QAnything)<a href="https://github.com/netease-youdao/QAnything"><img src="https://img.shields.io/github/stars/netease-youdao/QAnything"></a> |Question and Answer based on Anything.|
9
+ | [PaddleHub](https://github.com/PaddlePaddle/PaddleHub)<a href="https://github.com/PaddlePaddle/PaddleHub"><img src="https://img.shields.io/github/stars/PaddlePaddle/PaddleHub"></a> |400+ AI Models: Rich, high-quality AI models, including CV, NLP, Speech, Video and Cross-Modal.|
10
+ | [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP)<a href="https://github.com/PaddlePaddle/PaddleNLP"><img src="https://img.shields.io/github/stars/PaddlePaddle/PaddleNLP"></a> |A Large Language Model (LLM) development suite based on the PaddlePaddle.|
11
+ | [Rerun](https://github.com/rerun-io/rerun) <a href="https://github.com/rerun-io/rerun"><img src="https://img.shields.io/github/stars/rerun-io/rerun"></a> | Rerun is building the multimodal data stack to model, ingest, store, query and view robotics-style data |
12
+ | [Dango-Translator](https://github.com/PantsuDango/Dango-Translator) <a href="https://github.com/PantsuDango/Dango-Translator"><img src="https://img.shields.io/github/stars/PantsuDango/Dango-Translator"></a> | Recognize text on the screen, translate it and show the translation results in real time.|
13
+ | [PDF-Extract-Kit](https://github.com/opendatalab/PDF-Extract-Kit) <a href="https://github.com/opendatalab/PDF-Extract-Kit"><img src="https://img.shields.io/github/stars/opendatalab/PDF-Extract-Kit"></a> | PDF-Extract-Kit is a powerful open-source toolkit designed to efficiently extract high-quality content from complex and diverse PDF documents. |
14
+ | [manga-image-translator](https://github.com/zyddnys/manga-image-translator) <a href="https://github.com/zyddnys/manga-image-translator"><img src="https://img.shields.io/github/stars/zyddnys/manga-image-translator"></a> | Translate texts in manga/images.|
15
+ | [March7thAssistant](https://github.com/moesnow/March7thAssistant) <a href="https://github.com/moesnow/March7thAssistant"><img src="https://img.shields.io/github/stars/moesnow/March7thAssistant"></a> | Daily Tasks: Stamina recovery, daily training, claiming rewards, commissions, and farming. |
16
+ | [PaddlePaddle/models](https://github.com/PaddlePaddle/models) <a href="https://github.com/PaddlePaddle/models"><img src="https://img.shields.io/github/stars/PaddlePaddle/models"></a> |PaddlePaddle's industrial-grade model zoo.|
17
+ | [katanaml/sparrow](https://github.com/katanaml/sparrow) <a href="https://github.com/katanaml/sparrow"><img src="https://img.shields.io/github/stars/katanaml/sparrow"></a> | Sparrow is an innovative open-source solution for efficient data extraction and processing from various documents and images. |
18
+ | [RapidOCR](https://github.com/RapidAI/RapidOCR) <a href="https://github.com/RapidAI/RapidOCR"><img src="https://img.shields.io/github/stars/RapidAI/RapidOCR"></a> | Awesome OCR multiple programming languages toolkits based on ONNXRuntime, OpenVINO, PaddlePaddle and PyTorch |
19
+ | [autoMate](https://github.com/yuruotong1/autoMate) <a href="https://github.com/yuruotong1/autoMate"><img src="https://img.shields.io/github/stars/yuruotong1/autoMate"></a> | AI-Powered Local Automation Tool & Let Your Computer Work for You. |
20
+ | [Agent-S](https://github.com/simular-ai/Agent-S) <a href="https://github.com/simular-ai/Agent-S"><img src="https://img.shields.io/github/stars/simular-ai/Agent-S"></a> | A Compositional Generalist-Specialist Framework for Computer Use Agents. |
21
+ | [pdf-craft](https://github.com/oomol-lab/pdf-craft) <a href="https://github.com/oomol-lab/pdf-craft"><img src="https://img.shields.io/github/stars/oomol-lab/pdf-craft"></a> | PDF Craft can convert PDF files into various other formats. |
22
+ | [VV](https://github.com/Cicada000/VV) <a href="https://github.com/Cicada000/VV"><img src="https://img.shields.io/github/stars/Cicada000/VV"></a> | Zhang Weiwei Quotations Search Project. |
23
+ | [docetl](https://github.com/ucbepic/docetl) <a href="https://github.com/ucbepic/docetl"><img src="https://img.shields.io/github/stars/ucbepic/docetl"></a> | DocETL is a tool for creating and executing data processing pipelines, especially suited for complex document processing tasks. |
24
+ | [ZenlessZoneZero-Auto](https://github.com/sMythicalBird/ZenlessZoneZero-Auto) <a href="https://github.com/sMythicalBird/ZenlessZoneZero-Auto"><img src="https://img.shields.io/github/stars/sMythicalBird/ZenlessZoneZero-Auto"></a> | Zenless Zone Zero Automation Framework. |
25
+ | [Yuxi-Know](https://github.com/xerrors/Yuxi-Know) <a href="https://github.com/xerrors/Yuxi-Know"><img src="https://img.shields.io/github/stars/xerrors/Yuxi-Know"></a> | Knowledge graph question answering system based on LLMs. |
26
+ | [PaddleSharp](https://github.com/sdcb/PaddleSharp) <a href="https://github.com/sdcb/PaddleSharp"><img src="https://img.shields.io/github/stars/sdcb/PaddleSharp"></a>|.NET/C# binding for Baidu paddle inference library and PaddleOCR |
27
+ | [python-office](https://github.com/CoderWanFeng/python-office) <a href="https://github.com/CoderWanFeng/python-office"><img src="https://img.shields.io/github/stars/CoderWanFeng/python-office"></a> | Python tool for office works. |
28
+ | [OnnxOCR](https://github.com/jingsongliujing/OnnxOCR) <a href="https://github.com/jingsongliujing/OnnxOCR"><img src="https://img.shields.io/github/stars/jingsongliujing/OnnxOCR"></a>|A lightweight OCR system based on PaddleOCR, decoupled from the PaddlePaddle deep learning training framework, with ultra-fast inference speed |
29
+ | [Frigate](https://github.com/blakeblackshear/frigate) <a href="https://github.com/blakeblackshear/frigate"><img src="https://img.shields.io/github/stars/blakeblackshear/frigate"></a> | Real-time NVR system with AI-powered object detection and License Plate Recognition (LPR) using PaddleOCR. |
30
+ | ... |... |
configs/cls/ch_PP-OCRv3/ch_PP-OCRv3_rotnet.yml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ debug: false
3
+ use_gpu: true
4
+ epoch_num: 100
5
+ log_smooth_window: 20
6
+ print_batch_step: 10
7
+ save_model_dir: ./output/rec_ppocr_v3_rotnet
8
+ save_epoch_step: 3
9
+ eval_batch_step: [0, 2000]
10
+ cal_metric_during_train: true
11
+ pretrained_model: null
12
+ checkpoints: null
13
+ save_inference_dir: null
14
+ use_visualdl: false
15
+ infer_img: doc/imgs_words/ch/word_1.jpg
16
+ character_dict_path: ppocr/utils/ppocr_keys_v1.txt
17
+ max_text_length: 25
18
+ infer_mode: false
19
+ use_space_char: true
20
+ save_res_path: ./output/rec/predicts_chinese_lite_v2.0.txt
21
+ Optimizer:
22
+ name: Adam
23
+ beta1: 0.9
24
+ beta2: 0.999
25
+ lr:
26
+ name: Cosine
27
+ learning_rate: 0.001
28
+ regularizer:
29
+ name: L2
30
+ factor: 1.0e-05
31
+ Architecture:
32
+ model_type: cls
33
+ algorithm: CLS
34
+ Transform: null
35
+ Backbone:
36
+ name: MobileNetV1Enhance
37
+ scale: 0.5
38
+ last_conv_stride: [1, 2]
39
+ last_pool_type: avg
40
+ Neck:
41
+ Head:
42
+ name: ClsHead
43
+ class_dim: 4
44
+
45
+ Loss:
46
+ name: ClsLoss
47
+ main_indicator: acc
48
+
49
+ PostProcess:
50
+ name: ClsPostProcess
51
+
52
+ Metric:
53
+ name: ClsMetric
54
+ main_indicator: acc
55
+
56
+ Train:
57
+ dataset:
58
+ name: SimpleDataSet
59
+ data_dir: ./train_data
60
+ label_file_list:
61
+ - ./train_data/train_list.txt
62
+ transforms:
63
+ - DecodeImage:
64
+ img_mode: BGR
65
+ channel_first: false
66
+ - BaseDataAugmentation:
67
+ - RandAugment:
68
+ - SSLRotateResize:
69
+ image_shape: [3, 48, 320]
70
+ - KeepKeys:
71
+ keep_keys: ["image", "label"]
72
+ loader:
73
+ collate_fn: "SSLRotateCollate"
74
+ shuffle: true
75
+ batch_size_per_card: 32
76
+ drop_last: true
77
+ num_workers: 8
78
+ Eval:
79
+ dataset:
80
+ name: SimpleDataSet
81
+ data_dir: ./train_data
82
+ label_file_list:
83
+ - ./train_data/val_list.txt
84
+ transforms:
85
+ - DecodeImage:
86
+ img_mode: BGR
87
+ channel_first: false
88
+ - SSLRotateResize:
89
+ image_shape: [3, 48, 320]
90
+ - KeepKeys:
91
+ keep_keys: ["image", "label"]
92
+ loader:
93
+ collate_fn: "SSLRotateCollate"
94
+ shuffle: false
95
+ drop_last: false
96
+ batch_size_per_card: 64
97
+ num_workers: 8
98
+ profiler_options: null
configs/cls/cls_mv3.yml ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 100
4
+ log_smooth_window: 20
5
+ print_batch_step: 10
6
+ save_model_dir: ./output/cls/mv3/
7
+ save_epoch_step: 3
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [0, 1000]
10
+ cal_metric_during_train: True
11
+ pretrained_model:
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_words_en/word_10.png
16
+ label_list: ['0','180']
17
+
18
+ Architecture:
19
+ model_type: cls
20
+ algorithm: CLS
21
+ Transform:
22
+ Backbone:
23
+ name: MobileNetV3
24
+ scale: 0.35
25
+ model_name: small
26
+ Neck:
27
+ Head:
28
+ name: ClsHead
29
+ class_dim: 2
30
+
31
+ Loss:
32
+ name: ClsLoss
33
+
34
+ Optimizer:
35
+ name: Adam
36
+ beta1: 0.9
37
+ beta2: 0.999
38
+ lr:
39
+ name: Cosine
40
+ learning_rate: 0.001
41
+ regularizer:
42
+ name: 'L2'
43
+ factor: 0
44
+
45
+ PostProcess:
46
+ name: ClsPostProcess
47
+
48
+ Metric:
49
+ name: ClsMetric
50
+ main_indicator: acc
51
+
52
+ Train:
53
+ dataset:
54
+ name: SimpleDataSet
55
+ data_dir: ./train_data/cls
56
+ label_file_list:
57
+ - ./train_data/cls/train.txt
58
+ transforms:
59
+ - DecodeImage: # load image
60
+ img_mode: BGR
61
+ channel_first: False
62
+ - ClsLabelEncode: # Class handling label
63
+ - BaseDataAugmentation:
64
+ - RandAugment:
65
+ - ClsResizeImg:
66
+ image_shape: [3, 48, 192]
67
+ - KeepKeys:
68
+ keep_keys: ['image', 'label'] # dataloader will return list in this order
69
+ loader:
70
+ shuffle: True
71
+ batch_size_per_card: 512
72
+ drop_last: True
73
+ num_workers: 8
74
+
75
+ Eval:
76
+ dataset:
77
+ name: SimpleDataSet
78
+ data_dir: ./train_data/cls
79
+ label_file_list:
80
+ - ./train_data/cls/test.txt
81
+ transforms:
82
+ - DecodeImage: # load image
83
+ img_mode: BGR
84
+ channel_first: False
85
+ - ClsLabelEncode: # Class handling label
86
+ - ClsResizeImg:
87
+ image_shape: [3, 48, 192]
88
+ - KeepKeys:
89
+ keep_keys: ['image', 'label'] # dataloader will return list in this order
90
+ loader:
91
+ shuffle: False
92
+ drop_last: False
93
+ batch_size_per_card: 512
94
+ num_workers: 4
configs/det/PP-OCRv3/PP-OCRv3_det_cml.yml ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ debug: false
3
+ use_gpu: true
4
+ epoch_num: 500
5
+ log_smooth_window: 20
6
+ print_batch_step: 10
7
+ save_model_dir: ./output/ch_PP-OCR_v3_det/
8
+ save_epoch_step: 100
9
+ eval_batch_step:
10
+ - 0
11
+ - 400
12
+ cal_metric_during_train: false
13
+ pretrained_model: null
14
+ checkpoints: null
15
+ save_inference_dir: null
16
+ use_visualdl: false
17
+ infer_img: doc/imgs_en/img_10.jpg
18
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
19
+ distributed: true
20
+ d2s_train_image_shape: [3, -1, -1]
21
+ amp_dtype: bfloat16
22
+
23
+ Architecture:
24
+ name: DistillationModel
25
+ algorithm: Distillation
26
+ model_type: det
27
+ Models:
28
+ Student:
29
+ pretrained:
30
+ model_type: det
31
+ algorithm: DB
32
+ Transform: null
33
+ Backbone:
34
+ name: MobileNetV3
35
+ scale: 0.5
36
+ model_name: large
37
+ disable_se: true
38
+ Neck:
39
+ name: RSEFPN
40
+ out_channels: 96
41
+ shortcut: True
42
+ Head:
43
+ name: DBHead
44
+ k: 50
45
+ Student2:
46
+ pretrained:
47
+ model_type: det
48
+ algorithm: DB
49
+ Transform: null
50
+ Backbone:
51
+ name: MobileNetV3
52
+ scale: 0.5
53
+ model_name: large
54
+ disable_se: true
55
+ Neck:
56
+ name: RSEFPN
57
+ out_channels: 96
58
+ shortcut: True
59
+ Head:
60
+ name: DBHead
61
+ k: 50
62
+ Teacher:
63
+ freeze_params: true
64
+ return_all_feats: false
65
+ model_type: det
66
+ algorithm: DB
67
+ Backbone:
68
+ name: ResNet_vd
69
+ in_channels: 3
70
+ layers: 50
71
+ Neck:
72
+ name: LKPAN
73
+ out_channels: 256
74
+ Head:
75
+ name: DBHead
76
+ kernel_list: [7,2,2]
77
+ k: 50
78
+
79
+ Loss:
80
+ name: CombinedLoss
81
+ loss_config_list:
82
+ - DistillationDilaDBLoss:
83
+ weight: 1.0
84
+ model_name_pairs:
85
+ - ["Student", "Teacher"]
86
+ - ["Student2", "Teacher"]
87
+ key: maps
88
+ balance_loss: true
89
+ main_loss_type: DiceLoss
90
+ alpha: 5
91
+ beta: 10
92
+ ohem_ratio: 3
93
+ - DistillationDMLLoss:
94
+ model_name_pairs:
95
+ - ["Student", "Student2"]
96
+ maps_name: "thrink_maps"
97
+ weight: 1.0
98
+ model_name_pairs: ["Student", "Student2"]
99
+ key: maps
100
+ - DistillationDBLoss:
101
+ weight: 1.0
102
+ model_name_list: ["Student", "Student2"]
103
+ balance_loss: true
104
+ main_loss_type: DiceLoss
105
+ alpha: 5
106
+ beta: 10
107
+ ohem_ratio: 3
108
+
109
+ Optimizer:
110
+ name: Adam
111
+ beta1: 0.9
112
+ beta2: 0.999
113
+ lr:
114
+ name: Cosine
115
+ learning_rate: 0.001
116
+ warmup_epoch: 2
117
+ regularizer:
118
+ name: L2
119
+ factor: 5.0e-05
120
+
121
+ PostProcess:
122
+ name: DistillationDBPostProcess
123
+ model_name: ["Student"]
124
+ key: head_out
125
+ thresh: 0.3
126
+ box_thresh: 0.6
127
+ max_candidates: 1000
128
+ unclip_ratio: 1.5
129
+
130
+ Metric:
131
+ name: DistillationMetric
132
+ base_metric_name: DetMetric
133
+ main_indicator: hmean
134
+ key: "Student"
135
+
136
+ Train:
137
+ dataset:
138
+ name: SimpleDataSet
139
+ data_dir: ./train_data/icdar2015/text_localization/
140
+ label_file_list:
141
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
142
+ ratio_list: [1.0]
143
+ transforms:
144
+ - DecodeImage:
145
+ img_mode: BGR
146
+ channel_first: false
147
+ - DetLabelEncode: null
148
+ - CopyPaste:
149
+ - IaaAugment:
150
+ augmenter_args:
151
+ - type: Fliplr
152
+ args:
153
+ p: 0.5
154
+ - type: Affine
155
+ args:
156
+ rotate:
157
+ - -10
158
+ - 10
159
+ - type: Resize
160
+ args:
161
+ size:
162
+ - 0.5
163
+ - 3
164
+ - EastRandomCropData:
165
+ size:
166
+ - 960
167
+ - 960
168
+ max_tries: 50
169
+ keep_ratio: true
170
+ - MakeBorderMap:
171
+ shrink_ratio: 0.4
172
+ thresh_min: 0.3
173
+ thresh_max: 0.7
174
+ - MakeShrinkMap:
175
+ shrink_ratio: 0.4
176
+ min_text_size: 8
177
+ - NormalizeImage:
178
+ scale: 1./255.
179
+ mean:
180
+ - 0.485
181
+ - 0.456
182
+ - 0.406
183
+ std:
184
+ - 0.229
185
+ - 0.224
186
+ - 0.225
187
+ order: hwc
188
+ - ToCHWImage: null
189
+ - KeepKeys:
190
+ keep_keys:
191
+ - image
192
+ - threshold_map
193
+ - threshold_mask
194
+ - shrink_map
195
+ - shrink_mask
196
+ loader:
197
+ shuffle: true
198
+ drop_last: false
199
+ batch_size_per_card: 8
200
+ num_workers: 4
201
+
202
+ Eval:
203
+ dataset:
204
+ name: SimpleDataSet
205
+ data_dir: ./train_data/icdar2015/text_localization/
206
+ label_file_list:
207
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
208
+ transforms:
209
+ - DecodeImage: # load image
210
+ img_mode: BGR
211
+ channel_first: False
212
+ - DetLabelEncode: # Class handling label
213
+ - DetResizeForTest:
214
+ - NormalizeImage:
215
+ scale: 1./255.
216
+ mean: [0.485, 0.456, 0.406]
217
+ std: [0.229, 0.224, 0.225]
218
+ order: 'hwc'
219
+ - ToCHWImage:
220
+ - KeepKeys:
221
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
222
+ loader:
223
+ shuffle: False
224
+ drop_last: False
225
+ batch_size_per_card: 1 # must be 1
226
+ num_workers: 2
configs/det/PP-OCRv3/PP-OCRv3_det_dml.yml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 2
6
+ save_model_dir: ./output/ch_db_mv3/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [3000, 2000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+ d2s_train_image_shape: [3, -1, -1]
18
+
19
+ Architecture:
20
+ name: DistillationModel
21
+ algorithm: Distillation
22
+ model_type: det
23
+ Models:
24
+ Student:
25
+ return_all_feats: false
26
+ model_type: det
27
+ algorithm: DB
28
+ Backbone:
29
+ name: ResNet_vd
30
+ in_channels: 3
31
+ layers: 50
32
+ Neck:
33
+ name: LKPAN
34
+ out_channels: 256
35
+ Head:
36
+ name: DBHead
37
+ kernel_list: [7,2,2]
38
+ k: 50
39
+ Student2:
40
+ return_all_feats: false
41
+ model_type: det
42
+ algorithm: DB
43
+ Backbone:
44
+ name: ResNet_vd
45
+ in_channels: 3
46
+ layers: 50
47
+ Neck:
48
+ name: LKPAN
49
+ out_channels: 256
50
+ Head:
51
+ name: DBHead
52
+ kernel_list: [7,2,2]
53
+ k: 50
54
+
55
+
56
+ Loss:
57
+ name: CombinedLoss
58
+ loss_config_list:
59
+ - DistillationDMLLoss:
60
+ model_name_pairs:
61
+ - ["Student", "Student2"]
62
+ maps_name: "thrink_maps"
63
+ weight: 1.0
64
+ # act: None
65
+ model_name_pairs: ["Student", "Student2"]
66
+ key: maps
67
+ - DistillationDBLoss:
68
+ weight: 1.0
69
+ model_name_list: ["Student", "Student2"]
70
+ # key: maps
71
+ name: DBLoss
72
+ balance_loss: true
73
+ main_loss_type: DiceLoss
74
+ alpha: 5
75
+ beta: 10
76
+ ohem_ratio: 3
77
+
78
+
79
+ Optimizer:
80
+ name: Adam
81
+ beta1: 0.9
82
+ beta2: 0.999
83
+ lr:
84
+ name: Cosine
85
+ learning_rate: 0.001
86
+ warmup_epoch: 2
87
+ regularizer:
88
+ name: 'L2'
89
+ factor: 0
90
+
91
+ PostProcess:
92
+ name: DistillationDBPostProcess
93
+ model_name: ["Student", "Student2"]
94
+ key: head_out
95
+ thresh: 0.3
96
+ box_thresh: 0.6
97
+ max_candidates: 1000
98
+ unclip_ratio: 1.5
99
+
100
+ Metric:
101
+ name: DistillationMetric
102
+ base_metric_name: DetMetric
103
+ main_indicator: hmean
104
+ key: "Student"
105
+
106
+ Train:
107
+ dataset:
108
+ name: SimpleDataSet
109
+ data_dir: ./train_data/icdar2015/text_localization/
110
+ label_file_list:
111
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
112
+ ratio_list: [1.0]
113
+ transforms:
114
+ - DecodeImage: # load image
115
+ img_mode: BGR
116
+ channel_first: False
117
+ - DetLabelEncode: # Class handling label
118
+ - CopyPaste:
119
+ - IaaAugment:
120
+ augmenter_args:
121
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
122
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
123
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
124
+ - EastRandomCropData:
125
+ size: [960, 960]
126
+ max_tries: 50
127
+ keep_ratio: true
128
+ - MakeBorderMap:
129
+ shrink_ratio: 0.4
130
+ thresh_min: 0.3
131
+ thresh_max: 0.7
132
+ - MakeShrinkMap:
133
+ shrink_ratio: 0.4
134
+ min_text_size: 8
135
+ - NormalizeImage:
136
+ scale: 1./255.
137
+ mean: [0.485, 0.456, 0.406]
138
+ std: [0.229, 0.224, 0.225]
139
+ order: 'hwc'
140
+ - ToCHWImage:
141
+ - KeepKeys:
142
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
143
+ loader:
144
+ shuffle: True
145
+ drop_last: False
146
+ batch_size_per_card: 8
147
+ num_workers: 4
148
+
149
+ Eval:
150
+ dataset:
151
+ name: SimpleDataSet
152
+ data_dir: ./train_data/icdar2015/text_localization/
153
+ label_file_list:
154
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
155
+ transforms:
156
+ - DecodeImage: # load image
157
+ img_mode: BGR
158
+ channel_first: False
159
+ - DetLabelEncode: # Class handling label
160
+ - DetResizeForTest:
161
+ # image_shape: [736, 1280]
162
+ - NormalizeImage:
163
+ scale: 1./255.
164
+ mean: [0.485, 0.456, 0.406]
165
+ std: [0.229, 0.224, 0.225]
166
+ order: 'hwc'
167
+ - ToCHWImage:
168
+ - KeepKeys:
169
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
170
+ loader:
171
+ shuffle: False
172
+ drop_last: False
173
+ batch_size_per_card: 1 # must be 1
174
+ num_workers: 2
configs/det/PP-OCRv3/PP-OCRv3_mobile_det.yml ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv3_mobile_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: 500
6
+ log_smooth_window: 20
7
+ print_batch_step: 10
8
+ save_model_dir: ./output/PP-OCRv3_mobile_det/
9
+ save_epoch_step: 100
10
+ eval_batch_step:
11
+ - 0
12
+ - 400
13
+ cal_metric_during_train: false
14
+ pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams
15
+ checkpoints: null
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ infer_img: doc/imgs_en/img_10.jpg
19
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
20
+ distributed: true
21
+ d2s_train_image_shape: [3, -1, -1]
22
+
23
+ Architecture:
24
+ model_type: det
25
+ algorithm: DB
26
+ Transform:
27
+ Backbone:
28
+ name: MobileNetV3
29
+ scale: 0.5
30
+ model_name: large
31
+ disable_se: True
32
+ Neck:
33
+ name: RSEFPN
34
+ out_channels: 96
35
+ shortcut: True
36
+ Head:
37
+ name: DBHead
38
+ k: 50
39
+
40
+ Loss:
41
+ name: DBLoss
42
+ balance_loss: true
43
+ main_loss_type: DiceLoss
44
+ alpha: 5
45
+ beta: 10
46
+ ohem_ratio: 3
47
+ Optimizer:
48
+ name: Adam
49
+ beta1: 0.9
50
+ beta2: 0.999
51
+ lr:
52
+ name: Cosine
53
+ learning_rate: 0.001
54
+ warmup_epoch: 2
55
+ regularizer:
56
+ name: L2
57
+ factor: 5.0e-05
58
+ PostProcess:
59
+ name: DBPostProcess
60
+ thresh: 0.3
61
+ box_thresh: 0.6
62
+ max_candidates: 1000
63
+ unclip_ratio: 1.5
64
+ Metric:
65
+ name: DetMetric
66
+ main_indicator: hmean
67
+ Train:
68
+ dataset:
69
+ name: SimpleDataSet
70
+ data_dir: ./train_data/icdar2015/text_localization/
71
+ label_file_list:
72
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
73
+ ratio_list: [1.0]
74
+ transforms:
75
+ - DecodeImage:
76
+ img_mode: BGR
77
+ channel_first: false
78
+ - DetLabelEncode: null
79
+ - IaaAugment:
80
+ augmenter_args:
81
+ - type: Fliplr
82
+ args:
83
+ p: 0.5
84
+ - type: Affine
85
+ args:
86
+ rotate:
87
+ - -10
88
+ - 10
89
+ - type: Resize
90
+ args:
91
+ size:
92
+ - 0.5
93
+ - 3
94
+ - EastRandomCropData:
95
+ size:
96
+ - 960
97
+ - 960
98
+ max_tries: 50
99
+ keep_ratio: true
100
+ - MakeBorderMap:
101
+ shrink_ratio: 0.4
102
+ thresh_min: 0.3
103
+ thresh_max: 0.7
104
+ - MakeShrinkMap:
105
+ shrink_ratio: 0.4
106
+ min_text_size: 8
107
+ - NormalizeImage:
108
+ scale: 1./255.
109
+ mean:
110
+ - 0.485
111
+ - 0.456
112
+ - 0.406
113
+ std:
114
+ - 0.229
115
+ - 0.224
116
+ - 0.225
117
+ order: hwc
118
+ - ToCHWImage: null
119
+ - KeepKeys:
120
+ keep_keys:
121
+ - image
122
+ - threshold_map
123
+ - threshold_mask
124
+ - shrink_map
125
+ - shrink_mask
126
+ loader:
127
+ shuffle: true
128
+ drop_last: false
129
+ batch_size_per_card: 8
130
+ num_workers: 4
131
+ Eval:
132
+ dataset:
133
+ name: SimpleDataSet
134
+ data_dir: ./train_data/icdar2015/text_localization/
135
+ label_file_list:
136
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
137
+ transforms:
138
+ - DecodeImage:
139
+ img_mode: BGR
140
+ channel_first: false
141
+ - DetLabelEncode: null
142
+ - DetResizeForTest: null
143
+ - NormalizeImage:
144
+ scale: 1./255.
145
+ mean:
146
+ - 0.485
147
+ - 0.456
148
+ - 0.406
149
+ std:
150
+ - 0.229
151
+ - 0.224
152
+ - 0.225
153
+ order: hwc
154
+ - ToCHWImage: null
155
+ - KeepKeys:
156
+ keep_keys:
157
+ - image
158
+ - shape
159
+ - polys
160
+ - ignore_tags
161
+ loader:
162
+ shuffle: false
163
+ drop_last: false
164
+ batch_size_per_card: 1
165
+ num_workers: 2
configs/det/PP-OCRv3/PP-OCRv3_server_det.yml ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv3_server_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: 500
6
+ log_smooth_window: 20
7
+ print_batch_step: 10
8
+ save_model_dir: ./output/PP-OCRv3_server_det/
9
+ save_epoch_step: 100
10
+ eval_batch_step:
11
+ - 0
12
+ - 400
13
+ cal_metric_during_train: false
14
+ pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/ResNet50_vd_ssld_pretrained.pdparams
15
+ checkpoints: null
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ infer_img: doc/imgs_en/img_10.jpg
19
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
20
+ distributed: true
21
+ d2s_train_image_shape: [3, -1, -1]
22
+ amp_dtype: bfloat16
23
+
24
+ Architecture:
25
+ model_type: det
26
+ algorithm: DB
27
+ Backbone:
28
+ name: ResNet_vd
29
+ in_channels: 3
30
+ layers: 50
31
+ Neck:
32
+ name: LKPAN
33
+ out_channels: 256
34
+ Head:
35
+ name: DBHead
36
+ kernel_list: [7,2,2]
37
+ k: 50
38
+
39
+
40
+ Loss:
41
+ name: DBLoss
42
+ balance_loss: true
43
+ main_loss_type: DiceLoss
44
+ alpha: 5
45
+ beta: 10
46
+ ohem_ratio: 3
47
+ Optimizer:
48
+ name: Adam
49
+ beta1: 0.9
50
+ beta2: 0.999
51
+ lr:
52
+ name: Cosine
53
+ learning_rate: 0.001
54
+ warmup_epoch: 2
55
+ regularizer:
56
+ name: L2
57
+ factor: 5.0e-05
58
+
59
+ PostProcess:
60
+ name: DBPostProcess
61
+ thresh: 0.3
62
+ box_thresh: 0.6
63
+ max_candidates: 1000
64
+ unclip_ratio: 1.5
65
+
66
+ Metric:
67
+ name: DetMetric
68
+ main_indicator: hmean
69
+
70
+ Train:
71
+ dataset:
72
+ name: SimpleDataSet
73
+ data_dir: ./train_data/icdar2015/text_localization/
74
+ label_file_list:
75
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
76
+ transforms:
77
+ - DecodeImage:
78
+ img_mode: BGR
79
+ channel_first: false
80
+ - DetLabelEncode: null
81
+ - CopyPaste:
82
+ - IaaAugment:
83
+ augmenter_args:
84
+ - type: Fliplr
85
+ args:
86
+ p: 0.5
87
+ - type: Affine
88
+ args:
89
+ rotate:
90
+ - -10
91
+ - 10
92
+ - type: Resize
93
+ args:
94
+ size:
95
+ - 0.5
96
+ - 3
97
+ - EastRandomCropData:
98
+ size:
99
+ - 960
100
+ - 960
101
+ max_tries: 50
102
+ keep_ratio: true
103
+ - MakeBorderMap:
104
+ shrink_ratio: 0.4
105
+ thresh_min: 0.3
106
+ thresh_max: 0.7
107
+ - MakeShrinkMap:
108
+ shrink_ratio: 0.4
109
+ min_text_size: 8
110
+ - NormalizeImage:
111
+ scale: 1./255.
112
+ mean:
113
+ - 0.485
114
+ - 0.456
115
+ - 0.406
116
+ std:
117
+ - 0.229
118
+ - 0.224
119
+ - 0.225
120
+ order: hwc
121
+ - ToCHWImage: null
122
+ - KeepKeys:
123
+ keep_keys:
124
+ - image
125
+ - threshold_map
126
+ - threshold_mask
127
+ - shrink_map
128
+ - shrink_mask
129
+ loader:
130
+ shuffle: true
131
+ drop_last: false
132
+ batch_size_per_card: 8
133
+ num_workers: 4
134
+
135
+ Eval:
136
+ dataset:
137
+ name: SimpleDataSet
138
+ data_dir: ./train_data/icdar2015/text_localization/
139
+ label_file_list:
140
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
141
+ transforms:
142
+ - DecodeImage: # load image
143
+ img_mode: BGR
144
+ channel_first: False
145
+ - DetLabelEncode: # Class handling label
146
+ - DetResizeForTest:
147
+ - NormalizeImage:
148
+ scale: 1./255.
149
+ mean: [0.485, 0.456, 0.406]
150
+ std: [0.229, 0.224, 0.225]
151
+ order: 'hwc'
152
+ - ToCHWImage:
153
+ - KeepKeys:
154
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
155
+ loader:
156
+ shuffle: False
157
+ drop_last: False
158
+ batch_size_per_card: 1 # must be 1
159
+ num_workers: 2
160
+ profiler_options: null
configs/det/PP-OCRv4/PP-OCRv4_det_cml.yml ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ debug: false
3
+ use_gpu: true
4
+ epoch_num: 500
5
+ log_smooth_window: 20
6
+ print_batch_step: 20
7
+ save_model_dir: ./output/ch_PP-OCRv4
8
+ save_epoch_step: 50
9
+ eval_batch_step:
10
+ - 0
11
+ - 1000
12
+ cal_metric_during_train: false
13
+ checkpoints: null
14
+ pretrained_model: null
15
+ save_inference_dir: null
16
+ use_visualdl: false
17
+ infer_img: doc/imgs_en/img_10.jpg
18
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
19
+ d2s_train_image_shape: [3, 640, 640]
20
+ distributed: true
21
+ Architecture:
22
+ name: DistillationModel
23
+ algorithm: Distillation
24
+ model_type: det
25
+ Models:
26
+ Student:
27
+ model_type: det
28
+ algorithm: DB
29
+ Transform: null
30
+ Backbone:
31
+ name: PPLCNetV3
32
+ scale: 0.75
33
+ pretrained: false
34
+ det: true
35
+ Neck:
36
+ name: RSEFPN
37
+ out_channels: 96
38
+ shortcut: true
39
+ Head:
40
+ name: DBHead
41
+ k: 50
42
+ Student2:
43
+ pretrained: null
44
+ model_type: det
45
+ algorithm: DB
46
+ Transform: null
47
+ Backbone:
48
+ name: PPLCNetV3
49
+ scale: 0.75
50
+ pretrained: true
51
+ det: true
52
+ Neck:
53
+ name: RSEFPN
54
+ out_channels: 96
55
+ shortcut: true
56
+ Head:
57
+ name: DBHead
58
+ k: 50
59
+ Teacher:
60
+ pretrained: https://paddleocr.bj.bcebos.com/PP-OCRv4/chinese/ch_PP-OCRv4_det_cml_teacher_pretrained/teacher.pdparams
61
+ freeze_params: true
62
+ return_all_feats: false
63
+ model_type: det
64
+ algorithm: DB
65
+ Backbone:
66
+ name: ResNet_vd
67
+ in_channels: 3
68
+ layers: 50
69
+ Neck:
70
+ name: LKPAN
71
+ out_channels: 256
72
+ Head:
73
+ name: DBHead
74
+ kernel_list:
75
+ - 7
76
+ - 2
77
+ - 2
78
+ k: 50
79
+ Loss:
80
+ name: CombinedLoss
81
+ loss_config_list:
82
+ - DistillationDilaDBLoss:
83
+ weight: 1.0
84
+ model_name_pairs:
85
+ - - Student
86
+ - Teacher
87
+ - - Student2
88
+ - Teacher
89
+ key: maps
90
+ balance_loss: true
91
+ main_loss_type: DiceLoss
92
+ alpha: 5
93
+ beta: 10
94
+ ohem_ratio: 3
95
+ - DistillationDMLLoss:
96
+ model_name_pairs:
97
+ - Student
98
+ - Student2
99
+ maps_name: thrink_maps
100
+ weight: 1.0
101
+ key: maps
102
+ - DistillationDBLoss:
103
+ weight: 1.0
104
+ model_name_list:
105
+ - Student
106
+ - Student2
107
+ balance_loss: true
108
+ main_loss_type: DiceLoss
109
+ alpha: 5
110
+ beta: 10
111
+ ohem_ratio: 3
112
+ Optimizer:
113
+ name: Adam
114
+ beta1: 0.9
115
+ beta2: 0.999
116
+ lr:
117
+ name: Cosine
118
+ learning_rate: 0.001
119
+ warmup_epoch: 2
120
+ regularizer:
121
+ name: L2
122
+ factor: 5.0e-05
123
+ PostProcess:
124
+ name: DistillationDBPostProcess
125
+ model_name:
126
+ - Student
127
+ key: head_out
128
+ thresh: 0.3
129
+ box_thresh: 0.6
130
+ max_candidates: 1000
131
+ unclip_ratio: 1.5
132
+ Metric:
133
+ name: DistillationMetric
134
+ base_metric_name: DetMetric
135
+ main_indicator: hmean
136
+ key: Student
137
+ Train:
138
+ dataset:
139
+ name: SimpleDataSet
140
+ data_dir: ./train_data/icdar2015/text_localization/
141
+ label_file_list:
142
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
143
+ ratio_list: [1.0]
144
+ transforms:
145
+ - DecodeImage:
146
+ img_mode: BGR
147
+ channel_first: false
148
+ - DetLabelEncode: null
149
+ - IaaAugment:
150
+ augmenter_args:
151
+ - type: Fliplr
152
+ args:
153
+ p: 0.5
154
+ - type: Affine
155
+ args:
156
+ rotate:
157
+ - -10
158
+ - 10
159
+ - type: Resize
160
+ args:
161
+ size:
162
+ - 0.5
163
+ - 3
164
+ - EastRandomCropData:
165
+ size:
166
+ - 640
167
+ - 640
168
+ max_tries: 50
169
+ keep_ratio: true
170
+ - MakeBorderMap:
171
+ shrink_ratio: 0.4
172
+ thresh_min: 0.3
173
+ thresh_max: 0.7
174
+ total_epoch: 500
175
+ - MakeShrinkMap:
176
+ shrink_ratio: 0.4
177
+ min_text_size: 8
178
+ total_epoch: 500
179
+ - NormalizeImage:
180
+ scale: 1./255.
181
+ mean:
182
+ - 0.485
183
+ - 0.456
184
+ - 0.406
185
+ std:
186
+ - 0.229
187
+ - 0.224
188
+ - 0.225
189
+ order: hwc
190
+ - ToCHWImage: null
191
+ - KeepKeys:
192
+ keep_keys:
193
+ - image
194
+ - threshold_map
195
+ - threshold_mask
196
+ - shrink_map
197
+ - shrink_mask
198
+ loader:
199
+ shuffle: true
200
+ drop_last: false
201
+ batch_size_per_card: 16
202
+ num_workers: 8
203
+ Eval:
204
+ dataset:
205
+ name: SimpleDataSet
206
+ data_dir: ./train_data/icdar2015/text_localization/
207
+ label_file_list:
208
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
209
+ transforms:
210
+ - DecodeImage:
211
+ img_mode: BGR
212
+ channel_first: false
213
+ - DetLabelEncode: null
214
+ - DetResizeForTest:
215
+ limit_side_len: 960
216
+ limit_type: max
217
+ - NormalizeImage:
218
+ scale: 1./255.
219
+ mean:
220
+ - 0.485
221
+ - 0.456
222
+ - 0.406
223
+ std:
224
+ - 0.229
225
+ - 0.224
226
+ - 0.225
227
+ order: hwc
228
+ - ToCHWImage: null
229
+ - KeepKeys:
230
+ keep_keys:
231
+ - image
232
+ - shape
233
+ - polys
234
+ - ignore_tags
235
+ loader:
236
+ shuffle: false
237
+ drop_last: false
238
+ batch_size_per_card: 1
239
+ num_workers: 2
240
+ profiler_options: null
configs/det/PP-OCRv4/PP-OCRv4_mobile_det.yml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv4_mobile_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: &epoch_num 500
6
+ log_smooth_window: 20
7
+ print_batch_step: 100
8
+ save_model_dir: ./output/PP-OCRv4_mobile_det
9
+ save_epoch_step: 10
10
+ eval_batch_step:
11
+ - 0
12
+ - 1500
13
+ cal_metric_during_train: false
14
+ checkpoints:
15
+ pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/PPLCNetV3_x0_75_ocr_det.pdparams
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ infer_img: doc/imgs_en/img_10.jpg
19
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
20
+ d2s_train_image_shape: [3, 640, 640]
21
+ distributed: true
22
+
23
+ Architecture:
24
+ model_type: det
25
+ algorithm: DB
26
+ Transform: null
27
+ Backbone:
28
+ name: PPLCNetV3
29
+ scale: 0.75
30
+ det: True
31
+ Neck:
32
+ name: RSEFPN
33
+ out_channels: 96
34
+ shortcut: True
35
+ Head:
36
+ name: DBHead
37
+ k: 50
38
+ fix_nan: True
39
+
40
+ Loss:
41
+ name: DBLoss
42
+ balance_loss: true
43
+ main_loss_type: DiceLoss
44
+ alpha: 5
45
+ beta: 10
46
+ ohem_ratio: 3
47
+
48
+ Optimizer:
49
+ name: Adam
50
+ beta1: 0.9
51
+ beta2: 0.999
52
+ lr:
53
+ name: Cosine
54
+ learning_rate: 0.001 #(8*8c)
55
+ warmup_epoch: 2
56
+ regularizer:
57
+ name: L2
58
+ factor: 5.0e-05
59
+
60
+ PostProcess:
61
+ name: DBPostProcess
62
+ thresh: 0.3
63
+ box_thresh: 0.6
64
+ max_candidates: 1000
65
+ unclip_ratio: 1.5
66
+
67
+ Metric:
68
+ name: DetMetric
69
+ main_indicator: hmean
70
+
71
+ Train:
72
+ dataset:
73
+ name: SimpleDataSet
74
+ data_dir: ./train_data/icdar2015/text_localization/
75
+ label_file_list:
76
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
77
+ ratio_list: [1.0]
78
+ transforms:
79
+ - DecodeImage:
80
+ img_mode: BGR
81
+ channel_first: false
82
+ - DetLabelEncode: null
83
+ - CopyPaste: null
84
+ - IaaAugment:
85
+ augmenter_args:
86
+ - type: Fliplr
87
+ args:
88
+ p: 0.5
89
+ - type: Affine
90
+ args:
91
+ rotate:
92
+ - -10
93
+ - 10
94
+ - type: Resize
95
+ args:
96
+ size:
97
+ - 0.5
98
+ - 3
99
+ - EastRandomCropData:
100
+ size:
101
+ - 640
102
+ - 640
103
+ max_tries: 50
104
+ keep_ratio: true
105
+ - MakeBorderMap:
106
+ shrink_ratio: 0.4
107
+ thresh_min: 0.3
108
+ thresh_max: 0.7
109
+ total_epoch: *epoch_num
110
+ - MakeShrinkMap:
111
+ shrink_ratio: 0.4
112
+ min_text_size: 8
113
+ total_epoch: *epoch_num
114
+ - NormalizeImage:
115
+ scale: 1./255.
116
+ mean:
117
+ - 0.485
118
+ - 0.456
119
+ - 0.406
120
+ std:
121
+ - 0.229
122
+ - 0.224
123
+ - 0.225
124
+ order: hwc
125
+ - ToCHWImage: null
126
+ - KeepKeys:
127
+ keep_keys:
128
+ - image
129
+ - threshold_map
130
+ - threshold_mask
131
+ - shrink_map
132
+ - shrink_mask
133
+ loader:
134
+ shuffle: true
135
+ drop_last: false
136
+ batch_size_per_card: 8
137
+ num_workers: 8
138
+
139
+ Eval:
140
+ dataset:
141
+ name: SimpleDataSet
142
+ data_dir: ./train_data/icdar2015/text_localization/
143
+ label_file_list:
144
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
145
+ transforms:
146
+ - DecodeImage:
147
+ img_mode: BGR
148
+ channel_first: false
149
+ - DetLabelEncode: null
150
+ - DetResizeForTest:
151
+ - NormalizeImage:
152
+ scale: 1./255.
153
+ mean:
154
+ - 0.485
155
+ - 0.456
156
+ - 0.406
157
+ std:
158
+ - 0.229
159
+ - 0.224
160
+ - 0.225
161
+ order: hwc
162
+ - ToCHWImage: null
163
+ - KeepKeys:
164
+ keep_keys:
165
+ - image
166
+ - shape
167
+ - polys
168
+ - ignore_tags
169
+ loader:
170
+ shuffle: false
171
+ drop_last: false
172
+ batch_size_per_card: 1
173
+ num_workers: 2
174
+ profiler_options: null
configs/det/PP-OCRv4/PP-OCRv4_mobile_seal_det.yml ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv4_mobile_seal_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: 100
6
+ log_smooth_window: 20
7
+ print_batch_step: 10
8
+ save_model_dir: output
9
+ save_epoch_step: 1
10
+ eval_batch_step:
11
+ - 0
12
+ - 100
13
+ cal_metric_during_train: false
14
+ checkpoints:
15
+ pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/PPLCNetV3_x0_75_ocr_det.pdparams
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ distributed: true
19
+ d2s_train_image_shape: [3, 640, 640]
20
+
21
+ Architecture:
22
+ model_type: det
23
+ algorithm: DB
24
+ Transform: null
25
+ Backbone:
26
+ name: PPLCNetV3
27
+ scale: 0.75
28
+ det: True
29
+ Neck:
30
+ name: RSEFPN
31
+ out_channels: 96
32
+ shortcut: True
33
+ Head:
34
+ name: DBHead
35
+ k: 50
36
+
37
+ Loss:
38
+ name: DBLoss
39
+ balance_loss: true
40
+ main_loss_type: DiceLoss
41
+ alpha: 5
42
+ beta: 10
43
+ ohem_ratio: 3
44
+
45
+ Optimizer:
46
+ name: Adam
47
+ beta1: 0.9
48
+ beta2: 0.999
49
+ lr:
50
+ name: Cosine
51
+ learning_rate: 0.001
52
+ warmup_epoch: 2
53
+ regularizer:
54
+ name: L2
55
+ factor: 1e-6
56
+
57
+ PostProcess:
58
+ name: DBPostProcess
59
+ thresh: 0.2
60
+ box_thresh: 0.6
61
+ max_candidates: 1000
62
+ unclip_ratio: 0.5
63
+ box_type: "poly"
64
+
65
+ Metric:
66
+ name: DetMetric
67
+ main_indicator: hmean
68
+
69
+ Train:
70
+ dataset:
71
+ name: TextDetDataset
72
+ data_dir: datasets/ICDAR2015
73
+ label_file_list:
74
+ - datasets/ICDAR2015/train.txt
75
+ transforms:
76
+ - DecodeImage:
77
+ img_mode: BGR
78
+ channel_first: false
79
+ - DetLabelEncode: null
80
+ - IaaAugment:
81
+ augmenter_args:
82
+ - type: Fliplr
83
+ args:
84
+ p: 0.5
85
+ - type: Affine
86
+ args:
87
+ rotate:
88
+ - -10
89
+ - 10
90
+ - type: Resize
91
+ args:
92
+ size:
93
+ - 0.5
94
+ - 3
95
+ - EastRandomCropData:
96
+ size:
97
+ - 640
98
+ - 640
99
+ max_tries: 50
100
+ keep_ratio: true
101
+ - MakeBorderMap:
102
+ shrink_ratio: 0.8
103
+ thresh_min: 0.3
104
+ thresh_max: 0.7
105
+ total_epoch: 500
106
+ - MakeShrinkMap:
107
+ shrink_ratio: 0.8
108
+ min_text_size: 8
109
+ total_epoch: 500
110
+ - NormalizeImage:
111
+ scale: 1./255.
112
+ mean:
113
+ - 0.485
114
+ - 0.456
115
+ - 0.406
116
+ std:
117
+ - 0.229
118
+ - 0.224
119
+ - 0.225
120
+ order: hwc
121
+ - ToCHWImage: null
122
+ - KeepKeys:
123
+ keep_keys:
124
+ - image
125
+ - threshold_map
126
+ - threshold_mask
127
+ - shrink_map
128
+ - shrink_mask
129
+ loader:
130
+ shuffle: true
131
+ drop_last: false
132
+ batch_size_per_card: 8
133
+ num_workers: 3
134
+
135
+ Eval:
136
+ dataset:
137
+ name: TextDetDataset
138
+ data_dir: datasets/ICDAR2015
139
+ label_file_list:
140
+ - datasets/ICDAR2015/val.txt
141
+ transforms:
142
+ - DecodeImage:
143
+ img_mode: BGR
144
+ channel_first: false
145
+ - DetLabelEncode: null
146
+ - DetResizeForTest:
147
+ resize_long: 736
148
+ - NormalizeImage:
149
+ scale: 1./255.
150
+ mean:
151
+ - 0.485
152
+ - 0.456
153
+ - 0.406
154
+ std:
155
+ - 0.229
156
+ - 0.224
157
+ - 0.225
158
+ order: hwc
159
+ - ToCHWImage: null
160
+ - KeepKeys:
161
+ keep_keys:
162
+ - image
163
+ - shape
164
+ - polys
165
+ - ignore_tags
166
+ loader:
167
+ shuffle: false
168
+ drop_last: false
169
+ batch_size_per_card: 1
170
+ num_workers: 0
171
+ profiler_options: null
configs/det/PP-OCRv4/PP-OCRv4_server_det.yml ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv4_server_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: &epoch_num 500
6
+ log_smooth_window: 20
7
+ print_batch_step: 100
8
+ save_model_dir: ./output/PP-OCRv4_server_det
9
+ save_epoch_step: 10
10
+ eval_batch_step:
11
+ - 0
12
+ - 1500
13
+ cal_metric_during_train: false
14
+ checkpoints:
15
+ pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/PPHGNet_small_ocr_det.pdparams
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ infer_img: doc/imgs_en/img_10.jpg
19
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
20
+ d2s_train_image_shape: [3, 640, 640]
21
+ distributed: true
22
+
23
+ Architecture:
24
+ model_type: det
25
+ algorithm: DB
26
+ Transform: null
27
+ Backbone:
28
+ name: PPHGNet_small
29
+ det: True
30
+ Neck:
31
+ name: LKPAN
32
+ out_channels: 256
33
+ intracl: true
34
+ Head:
35
+ name: PFHeadLocal
36
+ k: 50
37
+ mode: "large"
38
+ fix_nan: True
39
+
40
+
41
+ Loss:
42
+ name: DBLoss
43
+ balance_loss: true
44
+ main_loss_type: DiceLoss
45
+ alpha: 5
46
+ beta: 10
47
+ ohem_ratio: 3
48
+
49
+ Optimizer:
50
+ name: Adam
51
+ beta1: 0.9
52
+ beta2: 0.999
53
+ lr:
54
+ name: Cosine
55
+ learning_rate: 0.001 #(8*8c)
56
+ warmup_epoch: 2
57
+ regularizer:
58
+ name: L2
59
+ factor: 1e-6
60
+
61
+ PostProcess:
62
+ name: DBPostProcess
63
+ thresh: 0.3
64
+ box_thresh: 0.6
65
+ max_candidates: 1000
66
+ unclip_ratio: 1.5
67
+
68
+ Metric:
69
+ name: DetMetric
70
+ main_indicator: hmean
71
+
72
+ Train:
73
+ dataset:
74
+ name: SimpleDataSet
75
+ data_dir: ./train_data/icdar2015/text_localization/
76
+ label_file_list:
77
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
78
+ ratio_list: [1.0]
79
+ transforms:
80
+ - DecodeImage:
81
+ img_mode: BGR
82
+ channel_first: false
83
+ - DetLabelEncode: null
84
+ - CopyPaste: null
85
+ - IaaAugment:
86
+ augmenter_args:
87
+ - type: Fliplr
88
+ args:
89
+ p: 0.5
90
+ - type: Affine
91
+ args:
92
+ rotate:
93
+ - -10
94
+ - 10
95
+ - type: Resize
96
+ args:
97
+ size:
98
+ - 0.5
99
+ - 3
100
+ - EastRandomCropData:
101
+ size:
102
+ - 640
103
+ - 640
104
+ max_tries: 50
105
+ keep_ratio: true
106
+ - MakeBorderMap:
107
+ shrink_ratio: 0.4
108
+ thresh_min: 0.3
109
+ thresh_max: 0.7
110
+ total_epoch: *epoch_num
111
+ - MakeShrinkMap:
112
+ shrink_ratio: 0.4
113
+ min_text_size: 8
114
+ total_epoch: *epoch_num
115
+ - NormalizeImage:
116
+ scale: 1./255.
117
+ mean:
118
+ - 0.485
119
+ - 0.456
120
+ - 0.406
121
+ std:
122
+ - 0.229
123
+ - 0.224
124
+ - 0.225
125
+ order: hwc
126
+ - ToCHWImage: null
127
+ - KeepKeys:
128
+ keep_keys:
129
+ - image
130
+ - threshold_map
131
+ - threshold_mask
132
+ - shrink_map
133
+ - shrink_mask
134
+ loader:
135
+ shuffle: true
136
+ drop_last: false
137
+ batch_size_per_card: 8
138
+ num_workers: 8
139
+
140
+ Eval:
141
+ dataset:
142
+ name: SimpleDataSet
143
+ data_dir: ./train_data/icdar2015/text_localization/
144
+ label_file_list:
145
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
146
+ transforms:
147
+ - DecodeImage:
148
+ img_mode: BGR
149
+ channel_first: false
150
+ - DetLabelEncode: null
151
+ - DetResizeForTest:
152
+ - NormalizeImage:
153
+ scale: 1./255.
154
+ mean:
155
+ - 0.485
156
+ - 0.456
157
+ - 0.406
158
+ std:
159
+ - 0.229
160
+ - 0.224
161
+ - 0.225
162
+ order: hwc
163
+ - ToCHWImage: null
164
+ - KeepKeys:
165
+ keep_keys:
166
+ - image
167
+ - shape
168
+ - polys
169
+ - ignore_tags
170
+ loader:
171
+ shuffle: false
172
+ drop_last: false
173
+ batch_size_per_card: 1
174
+ num_workers: 2
175
+ profiler_options: null
configs/det/PP-OCRv4/PP-OCRv4_server_seal_det.yml ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv4_server_seal_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: 100
6
+ log_smooth_window: 20
7
+ print_batch_step: 10
8
+ save_model_dir: output
9
+ save_epoch_step: 1
10
+ eval_batch_step:
11
+ - 0
12
+ - 100
13
+ cal_metric_during_train: false
14
+ checkpoints:
15
+ pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/PPHGNet_small_ocr_det.pdparams
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ distributed: true
19
+ d2s_train_image_shape: [3, 640, 640]
20
+
21
+ Architecture:
22
+ model_type: det
23
+ algorithm: DB
24
+ Transform: null
25
+ Backbone:
26
+ name: PPHGNet_small
27
+ det: True
28
+ Neck:
29
+ name: LKPAN
30
+ out_channels: 256
31
+ intracl: true
32
+ Head:
33
+ name: PFHeadLocal
34
+ k: 50
35
+ mode: "large"
36
+
37
+ Loss:
38
+ name: DBLoss
39
+ balance_loss: true
40
+ main_loss_type: DiceLoss
41
+ alpha: 5
42
+ beta: 10
43
+ ohem_ratio: 3
44
+
45
+ Optimizer:
46
+ name: Adam
47
+ beta1: 0.9
48
+ beta2: 0.999
49
+ lr:
50
+ name: Cosine
51
+ learning_rate: 0.001
52
+ warmup_epoch: 2
53
+ regularizer:
54
+ name: L2
55
+ factor: 1e-6
56
+
57
+ PostProcess:
58
+ name: DBPostProcess
59
+ thresh: 0.2
60
+ box_thresh: 0.6
61
+ max_candidates: 1000
62
+ unclip_ratio: 0.5
63
+ box_type: "poly"
64
+
65
+ Metric:
66
+ name: DetMetric
67
+ main_indicator: hmean
68
+
69
+ Train:
70
+ dataset:
71
+ name: TextDetDataset
72
+ data_dir: datasets/ICDAR2015
73
+ label_file_list:
74
+ - datasets/ICDAR2015/train.txt
75
+ transforms:
76
+ - DecodeImage:
77
+ img_mode: BGR
78
+ channel_first: false
79
+ - DetLabelEncode: null
80
+ - IaaAugment:
81
+ augmenter_args:
82
+ - type: Fliplr
83
+ args:
84
+ p: 0.5
85
+ - type: Affine
86
+ args:
87
+ rotate:
88
+ - -10
89
+ - 10
90
+ - type: Resize
91
+ args:
92
+ size:
93
+ - 0.5
94
+ - 3
95
+ - EastRandomCropData:
96
+ size:
97
+ - 640
98
+ - 640
99
+ max_tries: 50
100
+ keep_ratio: true
101
+ - MakeBorderMap:
102
+ shrink_ratio: 0.8
103
+ thresh_min: 0.3
104
+ thresh_max: 0.7
105
+ total_epoch: 500
106
+ - MakeShrinkMap:
107
+ shrink_ratio: 0.8
108
+ min_text_size: 8
109
+ total_epoch: 500
110
+ - NormalizeImage:
111
+ scale: 1./255.
112
+ mean:
113
+ - 0.485
114
+ - 0.456
115
+ - 0.406
116
+ std:
117
+ - 0.229
118
+ - 0.224
119
+ - 0.225
120
+ order: hwc
121
+ - ToCHWImage: null
122
+ - KeepKeys:
123
+ keep_keys:
124
+ - image
125
+ - threshold_map
126
+ - threshold_mask
127
+ - shrink_map
128
+ - shrink_mask
129
+ loader:
130
+ shuffle: true
131
+ drop_last: false
132
+ batch_size_per_card: 4
133
+ num_workers: 3
134
+
135
+ Eval:
136
+ dataset:
137
+ name: TextDetDataset
138
+ data_dir: datasets/ICDAR2015
139
+ label_file_list:
140
+ - datasets/ICDAR2015/val.txt
141
+ transforms:
142
+ - DecodeImage:
143
+ img_mode: BGR
144
+ channel_first: false
145
+ - DetLabelEncode: null
146
+ - DetResizeForTest:
147
+ resize_long: 736
148
+ - NormalizeImage:
149
+ scale: 1./255.
150
+ mean:
151
+ - 0.485
152
+ - 0.456
153
+ - 0.406
154
+ std:
155
+ - 0.229
156
+ - 0.224
157
+ - 0.225
158
+ order: hwc
159
+ - ToCHWImage: null
160
+ - KeepKeys:
161
+ keep_keys:
162
+ - image
163
+ - shape
164
+ - polys
165
+ - ignore_tags
166
+ loader:
167
+ shuffle: false
168
+ drop_last: false
169
+ batch_size_per_card: 1
170
+ num_workers: 0
171
+ profiler_options: null
configs/det/PP-OCRv5/PP-OCRv5_mobile_det.yml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv5_mobile_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: &epoch_num 500
6
+ log_smooth_window: 20
7
+ print_batch_step: 100
8
+ save_model_dir: ./output/PP-OCRv5_mobile_det
9
+ save_epoch_step: 10
10
+ eval_batch_step:
11
+ - 0
12
+ - 1500
13
+ cal_metric_during_train: false
14
+ checkpoints:
15
+ pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/PPLCNetV3_x0_75_ocr_det.pdparams
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ infer_img: doc/imgs_en/img_10.jpg
19
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
20
+ d2s_train_image_shape: [3, 640, 640]
21
+ distributed: true
22
+
23
+ Architecture:
24
+ model_type: det
25
+ algorithm: DB
26
+ Transform: null
27
+ Backbone:
28
+ name: PPLCNetV3
29
+ scale: 0.75
30
+ det: True
31
+ Neck:
32
+ name: RSEFPN
33
+ out_channels: 96
34
+ shortcut: True
35
+ Head:
36
+ name: DBHead
37
+ k: 50
38
+ fix_nan: True
39
+
40
+ Loss:
41
+ name: DBLoss
42
+ balance_loss: true
43
+ main_loss_type: DiceLoss
44
+ alpha: 5
45
+ beta: 10
46
+ ohem_ratio: 3
47
+
48
+ Optimizer:
49
+ name: Adam
50
+ beta1: 0.9
51
+ beta2: 0.999
52
+ lr:
53
+ name: Cosine
54
+ learning_rate: 0.001 #(8*8c)
55
+ warmup_epoch: 2
56
+ regularizer:
57
+ name: L2
58
+ factor: 5.0e-05
59
+
60
+ PostProcess:
61
+ name: DBPostProcess
62
+ thresh: 0.3
63
+ box_thresh: 0.6
64
+ max_candidates: 1000
65
+ unclip_ratio: 1.5
66
+
67
+ Metric:
68
+ name: DetMetric
69
+ main_indicator: hmean
70
+
71
+ Train:
72
+ dataset:
73
+ name: SimpleDataSet
74
+ data_dir: ./train_data/icdar2015/text_localization/
75
+ label_file_list:
76
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
77
+ ratio_list: [1.0]
78
+ transforms:
79
+ - DecodeImage:
80
+ img_mode: BGR
81
+ channel_first: false
82
+ - DetLabelEncode: null
83
+ - CopyPaste: null
84
+ - IaaAugment:
85
+ augmenter_args:
86
+ - type: Fliplr
87
+ args:
88
+ p: 0.5
89
+ - type: Affine
90
+ args:
91
+ rotate:
92
+ - -10
93
+ - 10
94
+ - type: Resize
95
+ args:
96
+ size:
97
+ - 0.5
98
+ - 3
99
+ - EastRandomCropData:
100
+ size:
101
+ - 640
102
+ - 640
103
+ max_tries: 50
104
+ keep_ratio: true
105
+ - MakeBorderMap:
106
+ shrink_ratio: 0.4
107
+ thresh_min: 0.3
108
+ thresh_max: 0.7
109
+ total_epoch: *epoch_num
110
+ - MakeShrinkMap:
111
+ shrink_ratio: 0.4
112
+ min_text_size: 8
113
+ total_epoch: *epoch_num
114
+ - NormalizeImage:
115
+ scale: 1./255.
116
+ mean:
117
+ - 0.485
118
+ - 0.456
119
+ - 0.406
120
+ std:
121
+ - 0.229
122
+ - 0.224
123
+ - 0.225
124
+ order: hwc
125
+ - ToCHWImage: null
126
+ - KeepKeys:
127
+ keep_keys:
128
+ - image
129
+ - threshold_map
130
+ - threshold_mask
131
+ - shrink_map
132
+ - shrink_mask
133
+ loader:
134
+ shuffle: true
135
+ drop_last: false
136
+ batch_size_per_card: 8
137
+ num_workers: 8
138
+
139
+ Eval:
140
+ dataset:
141
+ name: SimpleDataSet
142
+ data_dir: ./train_data/icdar2015/text_localization/
143
+ label_file_list:
144
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
145
+ transforms:
146
+ - DecodeImage:
147
+ img_mode: BGR
148
+ channel_first: false
149
+ - DetLabelEncode: null
150
+ - DetResizeForTest:
151
+ - NormalizeImage:
152
+ scale: 1./255.
153
+ mean:
154
+ - 0.485
155
+ - 0.456
156
+ - 0.406
157
+ std:
158
+ - 0.229
159
+ - 0.224
160
+ - 0.225
161
+ order: hwc
162
+ - ToCHWImage: null
163
+ - KeepKeys:
164
+ keep_keys:
165
+ - image
166
+ - shape
167
+ - polys
168
+ - ignore_tags
169
+ loader:
170
+ shuffle: false
171
+ drop_last: false
172
+ batch_size_per_card: 1
173
+ num_workers: 2
174
+ profiler_options: null
configs/det/PP-OCRv5/PP-OCRv5_server_det.yml ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ model_name: PP-OCRv5_server_det # To use static model for inference.
3
+ debug: false
4
+ use_gpu: true
5
+ epoch_num: &epoch_num 500
6
+ log_smooth_window: 20
7
+ print_batch_step: 10
8
+ save_model_dir: ./output/PP-OCRv5_server_det
9
+ save_epoch_step: 10
10
+ eval_batch_step:
11
+ - 0
12
+ - 1500
13
+ cal_metric_during_train: false
14
+ checkpoints:
15
+ pretrained_model: https://paddle-model-ecology.bj.bcebos.com/paddlex/official_pretrained_model/PPHGNetV2_B4_ocr_det.pdparams
16
+ save_inference_dir: null
17
+ use_visualdl: false
18
+ infer_img: doc/imgs_en/img_10.jpg
19
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
20
+ distributed: true
21
+
22
+ Architecture:
23
+ model_type: det
24
+ algorithm: DB
25
+ Transform: null
26
+ Backbone:
27
+ name: PPHGNetV2_B4
28
+ det: True
29
+ Neck:
30
+ name: LKPAN
31
+ out_channels: 256
32
+ intracl: true
33
+ Head:
34
+ name: PFHeadLocal
35
+ k: 50
36
+ mode: "large"
37
+
38
+
39
+ Loss:
40
+ name: DBLoss
41
+ balance_loss: true
42
+ main_loss_type: DiceLoss
43
+ alpha: 5
44
+ beta: 10
45
+ ohem_ratio: 3
46
+
47
+ Optimizer:
48
+ name: Adam
49
+ beta1: 0.9
50
+ beta2: 0.999
51
+ lr:
52
+ name: Cosine
53
+ learning_rate: 0.001 #(8*8c)
54
+ warmup_epoch: 2
55
+ regularizer:
56
+ name: L2
57
+ factor: 1e-6
58
+
59
+ PostProcess:
60
+ name: DBPostProcess
61
+ thresh: 0.3
62
+ box_thresh: 0.6
63
+ max_candidates: 1000
64
+ unclip_ratio: 1.5
65
+
66
+ Metric:
67
+ name: DetMetric
68
+ main_indicator: hmean
69
+
70
+ Train:
71
+ dataset:
72
+ name: SimpleDataSet
73
+ data_dir: ./train_data/icdar2015/text_localization/
74
+ label_file_list:
75
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
76
+ ratio_list: [1.0]
77
+ transforms:
78
+ - DecodeImage:
79
+ img_mode: BGR
80
+ channel_first: false
81
+ - DetLabelEncode: null
82
+ - CopyPaste: null
83
+ - IaaAugment:
84
+ augmenter_args:
85
+ - type: Fliplr
86
+ args:
87
+ p: 0.5
88
+ - type: Affine
89
+ args:
90
+ rotate:
91
+ - -10
92
+ - 10
93
+ - type: Resize
94
+ args:
95
+ size:
96
+ - 0.5
97
+ - 3
98
+ - EastRandomCropData:
99
+ size:
100
+ - 640
101
+ - 640
102
+ max_tries: 50
103
+ keep_ratio: true
104
+ - MakeBorderMap:
105
+ shrink_ratio: 0.4
106
+ thresh_min: 0.3
107
+ thresh_max: 0.7
108
+ total_epoch: *epoch_num
109
+ - MakeShrinkMap:
110
+ shrink_ratio: 0.4
111
+ min_text_size: 8
112
+ total_epoch: *epoch_num
113
+ - NormalizeImage:
114
+ scale: 1./255.
115
+ mean:
116
+ - 0.485
117
+ - 0.456
118
+ - 0.406
119
+ std:
120
+ - 0.229
121
+ - 0.224
122
+ - 0.225
123
+ order: hwc
124
+ - ToCHWImage: null
125
+ - KeepKeys:
126
+ keep_keys:
127
+ - image
128
+ - threshold_map
129
+ - threshold_mask
130
+ - shrink_map
131
+ - shrink_mask
132
+ loader:
133
+ shuffle: true
134
+ drop_last: false
135
+ batch_size_per_card: 8
136
+ num_workers: 8
137
+
138
+ Eval:
139
+ dataset:
140
+ name: SimpleDataSet
141
+ data_dir: ./train_data/icdar2015/text_localization/
142
+ label_file_list:
143
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
144
+ transforms:
145
+ - DecodeImage:
146
+ img_mode: BGR
147
+ channel_first: false
148
+ - DetLabelEncode: null
149
+ - DetResizeForTest:
150
+ - NormalizeImage:
151
+ scale: 1./255.
152
+ mean:
153
+ - 0.485
154
+ - 0.456
155
+ - 0.406
156
+ std:
157
+ - 0.229
158
+ - 0.224
159
+ - 0.225
160
+ order: hwc
161
+ - ToCHWImage: null
162
+ - KeepKeys:
163
+ keep_keys:
164
+ - image
165
+ - shape
166
+ - polys
167
+ - ignore_tags
168
+ loader:
169
+ shuffle: false
170
+ drop_last: false
171
+ batch_size_per_card: 1
172
+ num_workers: 2
173
+ profiler_options: null
configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 2
6
+ save_model_dir: ./output/ch_db_mv3/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [3000, 2000]
10
+ cal_metric_during_train: False
11
+ pretrained_model:
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+ use_amp: False
18
+ amp_level: O2
19
+ amp_dtype: bfloat16
20
+
21
+ Architecture:
22
+ name: DistillationModel
23
+ algorithm: Distillation
24
+ model_type: det
25
+ Models:
26
+ Teacher:
27
+ pretrained: ./pretrain_models/ch_ppocr_server_v2.0_det_train/best_accuracy
28
+ freeze_params: true
29
+ return_all_feats: false
30
+ model_type: det
31
+ algorithm: DB
32
+ Transform:
33
+ Backbone:
34
+ name: ResNet_vd
35
+ layers: 18
36
+ Neck:
37
+ name: DBFPN
38
+ out_channels: 256
39
+ Head:
40
+ name: DBHead
41
+ k: 50
42
+ Student:
43
+ pretrained:
44
+ freeze_params: false
45
+ return_all_feats: false
46
+ model_type: det
47
+ algorithm: DB
48
+ Backbone:
49
+ name: MobileNetV3
50
+ scale: 0.5
51
+ model_name: large
52
+ disable_se: True
53
+ Neck:
54
+ name: DBFPN
55
+ out_channels: 96
56
+ Head:
57
+ name: DBHead
58
+ k: 50
59
+ Student2:
60
+ pretrained:
61
+ freeze_params: false
62
+ return_all_feats: false
63
+ model_type: det
64
+ algorithm: DB
65
+ Transform:
66
+ Backbone:
67
+ name: MobileNetV3
68
+ scale: 0.5
69
+ model_name: large
70
+ disable_se: True
71
+ Neck:
72
+ name: DBFPN
73
+ out_channels: 96
74
+ Head:
75
+ name: DBHead
76
+ k: 50
77
+
78
+ Loss:
79
+ name: CombinedLoss
80
+ loss_config_list:
81
+ - DistillationDilaDBLoss:
82
+ weight: 1.0
83
+ model_name_pairs:
84
+ - ["Student", "Teacher"]
85
+ - ["Student2", "Teacher"]
86
+ key: maps
87
+ balance_loss: true
88
+ main_loss_type: DiceLoss
89
+ alpha: 5
90
+ beta: 10
91
+ ohem_ratio: 3
92
+ - DistillationDMLLoss:
93
+ model_name_pairs:
94
+ - ["Student", "Student2"]
95
+ maps_name: "thrink_maps"
96
+ weight: 1.0
97
+ # act: None
98
+ model_name_pairs: ["Student", "Student2"]
99
+ key: maps
100
+ - DistillationDBLoss:
101
+ weight: 1.0
102
+ model_name_list: ["Student", "Student2"]
103
+ # key: maps
104
+ # name: DBLoss
105
+ balance_loss: true
106
+ main_loss_type: DiceLoss
107
+ alpha: 5
108
+ beta: 10
109
+ ohem_ratio: 3
110
+
111
+
112
+ Optimizer:
113
+ name: Adam
114
+ beta1: 0.9
115
+ beta2: 0.999
116
+ lr:
117
+ name: Cosine
118
+ learning_rate: 0.001
119
+ warmup_epoch: 2
120
+ regularizer:
121
+ name: 'L2'
122
+ factor: 0
123
+
124
+ PostProcess:
125
+ name: DistillationDBPostProcess
126
+ model_name: ["Student", "Student2", "Teacher"]
127
+ # key: maps
128
+ thresh: 0.3
129
+ box_thresh: 0.6
130
+ max_candidates: 1000
131
+ unclip_ratio: 1.5
132
+
133
+ Metric:
134
+ name: DistillationMetric
135
+ base_metric_name: DetMetric
136
+ main_indicator: hmean
137
+ key: "Student"
138
+
139
+ Train:
140
+ dataset:
141
+ name: SimpleDataSet
142
+ data_dir: ./train_data/icdar2015/text_localization/
143
+ label_file_list:
144
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
145
+ ratio_list: [1.0]
146
+ transforms:
147
+ - DecodeImage: # load image
148
+ img_mode: BGR
149
+ channel_first: False
150
+ - DetLabelEncode: # Class handling label
151
+ - CopyPaste:
152
+ - IaaAugment:
153
+ augmenter_args:
154
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
155
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
156
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
157
+ - EastRandomCropData:
158
+ size: [960, 960]
159
+ max_tries: 50
160
+ keep_ratio: true
161
+ - MakeBorderMap:
162
+ shrink_ratio: 0.4
163
+ thresh_min: 0.3
164
+ thresh_max: 0.7
165
+ - MakeShrinkMap:
166
+ shrink_ratio: 0.4
167
+ min_text_size: 8
168
+ - NormalizeImage:
169
+ scale: 1./255.
170
+ mean: [0.485, 0.456, 0.406]
171
+ std: [0.229, 0.224, 0.225]
172
+ order: 'hwc'
173
+ - ToCHWImage:
174
+ - KeepKeys:
175
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
176
+ loader:
177
+ shuffle: True
178
+ drop_last: False
179
+ batch_size_per_card: 8
180
+ num_workers: 4
181
+
182
+ Eval:
183
+ dataset:
184
+ name: SimpleDataSet
185
+ data_dir: ./train_data/icdar2015/text_localization/
186
+ label_file_list:
187
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
188
+ transforms:
189
+ - DecodeImage: # load image
190
+ img_mode: BGR
191
+ channel_first: False
192
+ - DetLabelEncode: # Class handling label
193
+ - DetResizeForTest:
194
+ - NormalizeImage:
195
+ scale: 1./255.
196
+ mean: [0.485, 0.456, 0.406]
197
+ std: [0.229, 0.224, 0.225]
198
+ order: 'hwc'
199
+ - ToCHWImage:
200
+ - KeepKeys:
201
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
202
+ loader:
203
+ shuffle: False
204
+ drop_last: False
205
+ batch_size_per_card: 1 # must be 1
206
+ num_workers: 2
configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_distill.yml ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 2
6
+ save_model_dir: ./output/ch_db_mv3/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [3000, 2000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+
18
+ Architecture:
19
+ name: DistillationModel
20
+ algorithm: Distillation
21
+ model_type: det
22
+ Models:
23
+ Student:
24
+ pretrained: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
25
+ freeze_params: false
26
+ return_all_feats: false
27
+ model_type: det
28
+ algorithm: DB
29
+ Backbone:
30
+ name: MobileNetV3
31
+ scale: 0.5
32
+ model_name: large
33
+ disable_se: True
34
+ Neck:
35
+ name: DBFPN
36
+ out_channels: 96
37
+ Head:
38
+ name: DBHead
39
+ k: 50
40
+ Teacher:
41
+ pretrained: ./pretrain_models/ch_ppocr_server_v2.0_det_train/best_accuracy
42
+ freeze_params: true
43
+ return_all_feats: false
44
+ model_type: det
45
+ algorithm: DB
46
+ Transform:
47
+ Backbone:
48
+ name: ResNet_vd
49
+ layers: 18
50
+ Neck:
51
+ name: DBFPN
52
+ out_channels: 256
53
+ Head:
54
+ name: DBHead
55
+ k: 50
56
+
57
+ Loss:
58
+ name: CombinedLoss
59
+ loss_config_list:
60
+ - DistillationDilaDBLoss:
61
+ weight: 1.0
62
+ model_name_pairs:
63
+ - ["Student", "Teacher"]
64
+ key: maps
65
+ balance_loss: true
66
+ main_loss_type: DiceLoss
67
+ alpha: 5
68
+ beta: 10
69
+ ohem_ratio: 3
70
+ - DistillationDBLoss:
71
+ weight: 1.0
72
+ model_name_list: ["Student"]
73
+ name: DBLoss
74
+ balance_loss: true
75
+ main_loss_type: DiceLoss
76
+ alpha: 5
77
+ beta: 10
78
+ ohem_ratio: 3
79
+
80
+ Optimizer:
81
+ name: Adam
82
+ beta1: 0.9
83
+ beta2: 0.999
84
+ lr:
85
+ name: Cosine
86
+ learning_rate: 0.001
87
+ warmup_epoch: 2
88
+ regularizer:
89
+ name: 'L2'
90
+ factor: 0
91
+
92
+ PostProcess:
93
+ name: DistillationDBPostProcess
94
+ model_name: ["Student"]
95
+ key: head_out
96
+ thresh: 0.3
97
+ box_thresh: 0.6
98
+ max_candidates: 1000
99
+ unclip_ratio: 1.5
100
+
101
+ Metric:
102
+ name: DistillationMetric
103
+ base_metric_name: DetMetric
104
+ main_indicator: hmean
105
+ key: "Student"
106
+
107
+ Train:
108
+ dataset:
109
+ name: SimpleDataSet
110
+ data_dir: ./train_data/icdar2015/text_localization/
111
+ label_file_list:
112
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
113
+ ratio_list: [1.0]
114
+ transforms:
115
+ - DecodeImage: # load image
116
+ img_mode: BGR
117
+ channel_first: False
118
+ - DetLabelEncode: # Class handling label
119
+ - CopyPaste:
120
+ - IaaAugment:
121
+ augmenter_args:
122
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
123
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
124
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
125
+ - EastRandomCropData:
126
+ size: [960, 960]
127
+ max_tries: 50
128
+ keep_ratio: true
129
+ - MakeBorderMap:
130
+ shrink_ratio: 0.4
131
+ thresh_min: 0.3
132
+ thresh_max: 0.7
133
+ - MakeShrinkMap:
134
+ shrink_ratio: 0.4
135
+ min_text_size: 8
136
+ - NormalizeImage:
137
+ scale: 1./255.
138
+ mean: [0.485, 0.456, 0.406]
139
+ std: [0.229, 0.224, 0.225]
140
+ order: 'hwc'
141
+ - ToCHWImage:
142
+ - KeepKeys:
143
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
144
+ loader:
145
+ shuffle: True
146
+ drop_last: False
147
+ batch_size_per_card: 8
148
+ num_workers: 4
149
+
150
+ Eval:
151
+ dataset:
152
+ name: SimpleDataSet
153
+ data_dir: ./train_data/icdar2015/text_localization/
154
+ label_file_list:
155
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
156
+ transforms:
157
+ - DecodeImage: # load image
158
+ img_mode: BGR
159
+ channel_first: False
160
+ - DetLabelEncode: # Class handling label
161
+ - DetResizeForTest:
162
+ # image_shape: [736, 1280]
163
+ - NormalizeImage:
164
+ scale: 1./255.
165
+ mean: [0.485, 0.456, 0.406]
166
+ std: [0.229, 0.224, 0.225]
167
+ order: 'hwc'
168
+ - ToCHWImage:
169
+ - KeepKeys:
170
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
171
+ loader:
172
+ shuffle: False
173
+ drop_last: False
174
+ batch_size_per_card: 1 # must be 1
175
+ num_workers: 2
configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_dml.yml ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 2
6
+ save_model_dir: ./output/ch_db_mv3/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [3000, 2000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+
18
+ Architecture:
19
+ name: DistillationModel
20
+ algorithm: Distillation
21
+ model_type: det
22
+ Models:
23
+ Student:
24
+ pretrained: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
25
+ freeze_params: false
26
+ return_all_feats: false
27
+ model_type: det
28
+ algorithm: DB
29
+ Backbone:
30
+ name: MobileNetV3
31
+ scale: 0.5
32
+ model_name: large
33
+ disable_se: True
34
+ Neck:
35
+ name: DBFPN
36
+ out_channels: 96
37
+ Head:
38
+ name: DBHead
39
+ k: 50
40
+ Teacher:
41
+ pretrained: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
42
+ freeze_params: false
43
+ return_all_feats: false
44
+ model_type: det
45
+ algorithm: DB
46
+ Transform:
47
+ Backbone:
48
+ name: MobileNetV3
49
+ scale: 0.5
50
+ model_name: large
51
+ disable_se: True
52
+ Neck:
53
+ name: DBFPN
54
+ out_channels: 96
55
+ Head:
56
+ name: DBHead
57
+ k: 50
58
+
59
+
60
+ Loss:
61
+ name: CombinedLoss
62
+ loss_config_list:
63
+ - DistillationDMLLoss:
64
+ model_name_pairs:
65
+ - ["Student", "Teacher"]
66
+ maps_name: "thrink_maps"
67
+ weight: 1.0
68
+ # act: None
69
+ model_name_pairs: ["Student", "Teacher"]
70
+ key: maps
71
+ - DistillationDBLoss:
72
+ weight: 1.0
73
+ model_name_list: ["Student", "Teacher"]
74
+ # key: maps
75
+ name: DBLoss
76
+ balance_loss: true
77
+ main_loss_type: DiceLoss
78
+ alpha: 5
79
+ beta: 10
80
+ ohem_ratio: 3
81
+
82
+
83
+ Optimizer:
84
+ name: Adam
85
+ beta1: 0.9
86
+ beta2: 0.999
87
+ lr:
88
+ name: Cosine
89
+ learning_rate: 0.001
90
+ warmup_epoch: 2
91
+ regularizer:
92
+ name: 'L2'
93
+ factor: 0
94
+
95
+ PostProcess:
96
+ name: DistillationDBPostProcess
97
+ model_name: ["Student", "Teacher"]
98
+ key: head_out
99
+ thresh: 0.3
100
+ box_thresh: 0.6
101
+ max_candidates: 1000
102
+ unclip_ratio: 1.5
103
+
104
+ Metric:
105
+ name: DistillationMetric
106
+ base_metric_name: DetMetric
107
+ main_indicator: hmean
108
+ key: "Student"
109
+
110
+ Train:
111
+ dataset:
112
+ name: SimpleDataSet
113
+ data_dir: ./train_data/icdar2015/text_localization/
114
+ label_file_list:
115
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
116
+ ratio_list: [1.0]
117
+ transforms:
118
+ - DecodeImage: # load image
119
+ img_mode: BGR
120
+ channel_first: False
121
+ - DetLabelEncode: # Class handling label
122
+ - CopyPaste:
123
+ - IaaAugment:
124
+ augmenter_args:
125
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
126
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
127
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
128
+ - EastRandomCropData:
129
+ size: [960, 960]
130
+ max_tries: 50
131
+ keep_ratio: true
132
+ - MakeBorderMap:
133
+ shrink_ratio: 0.4
134
+ thresh_min: 0.3
135
+ thresh_max: 0.7
136
+ - MakeShrinkMap:
137
+ shrink_ratio: 0.4
138
+ min_text_size: 8
139
+ - NormalizeImage:
140
+ scale: 1./255.
141
+ mean: [0.485, 0.456, 0.406]
142
+ std: [0.229, 0.224, 0.225]
143
+ order: 'hwc'
144
+ - ToCHWImage:
145
+ - KeepKeys:
146
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
147
+ loader:
148
+ shuffle: True
149
+ drop_last: False
150
+ batch_size_per_card: 8
151
+ num_workers: 4
152
+
153
+ Eval:
154
+ dataset:
155
+ name: SimpleDataSet
156
+ data_dir: ./train_data/icdar2015/text_localization/
157
+ label_file_list:
158
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
159
+ transforms:
160
+ - DecodeImage: # load image
161
+ img_mode: BGR
162
+ channel_first: False
163
+ - DetLabelEncode: # Class handling label
164
+ - DetResizeForTest:
165
+ # image_shape: [736, 1280]
166
+ - NormalizeImage:
167
+ scale: 1./255.
168
+ mean: [0.485, 0.456, 0.406]
169
+ std: [0.229, 0.224, 0.225]
170
+ order: 'hwc'
171
+ - ToCHWImage:
172
+ - KeepKeys:
173
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
174
+ loader:
175
+ shuffle: False
176
+ drop_last: False
177
+ batch_size_per_card: 1 # must be 1
178
+ num_workers: 2
configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_student.yml ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 10
6
+ save_model_dir: ./output/ch_db_mv3/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [0, 400]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/student.pdparams
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+
18
+ Architecture:
19
+ model_type: det
20
+ algorithm: DB
21
+ Transform:
22
+ Backbone:
23
+ name: MobileNetV3
24
+ scale: 0.5
25
+ model_name: large
26
+ disable_se: True
27
+ Neck:
28
+ name: DBFPN
29
+ out_channels: 96
30
+ Head:
31
+ name: DBHead
32
+ k: 50
33
+
34
+ Loss:
35
+ name: DBLoss
36
+ balance_loss: true
37
+ main_loss_type: DiceLoss
38
+ alpha: 5
39
+ beta: 10
40
+ ohem_ratio: 3
41
+
42
+ Optimizer:
43
+ name: Adam
44
+ beta1: 0.9
45
+ beta2: 0.999
46
+ lr:
47
+ name: Cosine
48
+ learning_rate: 0.001
49
+ warmup_epoch: 2
50
+ regularizer:
51
+ name: 'L2'
52
+ factor: 0
53
+
54
+ PostProcess:
55
+ name: DBPostProcess
56
+ thresh: 0.3
57
+ box_thresh: 0.6
58
+ max_candidates: 1000
59
+ unclip_ratio: 1.5
60
+
61
+ Metric:
62
+ name: DetMetric
63
+ main_indicator: hmean
64
+
65
+ Train:
66
+ dataset:
67
+ name: SimpleDataSet
68
+ data_dir: ./train_data/icdar2015/text_localization/
69
+ label_file_list:
70
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
71
+ ratio_list: [1.0]
72
+ transforms:
73
+ - DecodeImage: # load image
74
+ img_mode: BGR
75
+ channel_first: False
76
+ - DetLabelEncode: # Class handling label
77
+ - IaaAugment:
78
+ augmenter_args:
79
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
80
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
81
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
82
+ - EastRandomCropData:
83
+ size: [960, 960]
84
+ max_tries: 50
85
+ keep_ratio: true
86
+ - MakeBorderMap:
87
+ shrink_ratio: 0.4
88
+ thresh_min: 0.3
89
+ thresh_max: 0.7
90
+ - MakeShrinkMap:
91
+ shrink_ratio: 0.4
92
+ min_text_size: 8
93
+ - NormalizeImage:
94
+ scale: 1./255.
95
+ mean: [0.485, 0.456, 0.406]
96
+ std: [0.229, 0.224, 0.225]
97
+ order: 'hwc'
98
+ - ToCHWImage:
99
+ - KeepKeys:
100
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
101
+ loader:
102
+ shuffle: True
103
+ drop_last: False
104
+ batch_size_per_card: 8
105
+ num_workers: 4
106
+
107
+ Eval:
108
+ dataset:
109
+ name: SimpleDataSet
110
+ data_dir: ./train_data/icdar2015/text_localization/
111
+ label_file_list:
112
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
113
+ transforms:
114
+ - DecodeImage: # load image
115
+ img_mode: BGR
116
+ channel_first: False
117
+ - DetLabelEncode: # Class handling label
118
+ - DetResizeForTest:
119
+ # image_shape: [736, 1280]
120
+ - NormalizeImage:
121
+ scale: 1./255.
122
+ mean: [0.485, 0.456, 0.406]
123
+ std: [0.229, 0.224, 0.225]
124
+ order: 'hwc'
125
+ - ToCHWImage:
126
+ - KeepKeys:
127
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
128
+ loader:
129
+ shuffle: False
130
+ drop_last: False
131
+ batch_size_per_card: 1 # must be 1
132
+ num_workers: 2
configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 2
6
+ save_model_dir: ./output/ch_db_mv3/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [3000, 2000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+
18
+ Architecture:
19
+ model_type: det
20
+ algorithm: DB
21
+ Transform:
22
+ Backbone:
23
+ name: MobileNetV3
24
+ scale: 0.5
25
+ model_name: large
26
+ disable_se: True
27
+ Neck:
28
+ name: DBFPN
29
+ out_channels: 96
30
+ Head:
31
+ name: DBHead
32
+ k: 50
33
+
34
+ Loss:
35
+ name: DBLoss
36
+ balance_loss: true
37
+ main_loss_type: DiceLoss
38
+ alpha: 5
39
+ beta: 10
40
+ ohem_ratio: 3
41
+
42
+ Optimizer:
43
+ name: Adam
44
+ beta1: 0.9
45
+ beta2: 0.999
46
+ lr:
47
+ name: Cosine
48
+ learning_rate: 0.001
49
+ warmup_epoch: 2
50
+ regularizer:
51
+ name: 'L2'
52
+ factor: 0
53
+
54
+ PostProcess:
55
+ name: DBPostProcess
56
+ thresh: 0.3
57
+ box_thresh: 0.6
58
+ max_candidates: 1000
59
+ unclip_ratio: 1.5
60
+
61
+ Metric:
62
+ name: DetMetric
63
+ main_indicator: hmean
64
+
65
+ Train:
66
+ dataset:
67
+ name: SimpleDataSet
68
+ data_dir: ./train_data/icdar2015/text_localization/
69
+ label_file_list:
70
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
71
+ ratio_list: [1.0]
72
+ transforms:
73
+ - DecodeImage: # load image
74
+ img_mode: BGR
75
+ channel_first: False
76
+ - DetLabelEncode: # Class handling label
77
+ - IaaAugment:
78
+ augmenter_args:
79
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
80
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
81
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
82
+ - EastRandomCropData:
83
+ size: [960, 960]
84
+ max_tries: 50
85
+ keep_ratio: true
86
+ - MakeBorderMap:
87
+ shrink_ratio: 0.4
88
+ thresh_min: 0.3
89
+ thresh_max: 0.7
90
+ - MakeShrinkMap:
91
+ shrink_ratio: 0.4
92
+ min_text_size: 8
93
+ - NormalizeImage:
94
+ scale: 1./255.
95
+ mean: [0.485, 0.456, 0.406]
96
+ std: [0.229, 0.224, 0.225]
97
+ order: 'hwc'
98
+ - ToCHWImage:
99
+ - KeepKeys:
100
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
101
+ loader:
102
+ shuffle: True
103
+ drop_last: False
104
+ batch_size_per_card: 8
105
+ num_workers: 4
106
+
107
+ Eval:
108
+ dataset:
109
+ name: SimpleDataSet
110
+ data_dir: ./train_data/icdar2015/text_localization/
111
+ label_file_list:
112
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
113
+ transforms:
114
+ - DecodeImage: # load image
115
+ img_mode: BGR
116
+ channel_first: False
117
+ - DetLabelEncode: # Class handling label
118
+ - DetResizeForTest:
119
+ # image_shape: [736, 1280]
120
+ - NormalizeImage:
121
+ scale: 1./255.
122
+ mean: [0.485, 0.456, 0.406]
123
+ std: [0.229, 0.224, 0.225]
124
+ order: 'hwc'
125
+ - ToCHWImage:
126
+ - KeepKeys:
127
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
128
+ loader:
129
+ shuffle: False
130
+ drop_last: False
131
+ batch_size_per_card: 1 # must be 1
132
+ num_workers: 2
configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 2
6
+ save_model_dir: ./output/ch_db_res18/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [3000, 2000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/ResNet18_vd_pretrained
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+
18
+ Architecture:
19
+ model_type: det
20
+ algorithm: DB
21
+ Transform:
22
+ Backbone:
23
+ name: ResNet_vd
24
+ layers: 18
25
+ disable_se: True
26
+ Neck:
27
+ name: DBFPN
28
+ out_channels: 256
29
+ Head:
30
+ name: DBHead
31
+ k: 50
32
+
33
+ Loss:
34
+ name: DBLoss
35
+ balance_loss: true
36
+ main_loss_type: DiceLoss
37
+ alpha: 5
38
+ beta: 10
39
+ ohem_ratio: 3
40
+
41
+ Optimizer:
42
+ name: Adam
43
+ beta1: 0.9
44
+ beta2: 0.999
45
+ lr:
46
+ name: Cosine
47
+ learning_rate: 0.001
48
+ warmup_epoch: 2
49
+ regularizer:
50
+ name: 'L2'
51
+ factor: 0
52
+
53
+ PostProcess:
54
+ name: DBPostProcess
55
+ thresh: 0.3
56
+ box_thresh: 0.6
57
+ max_candidates: 1000
58
+ unclip_ratio: 1.5
59
+
60
+ Metric:
61
+ name: DetMetric
62
+ main_indicator: hmean
63
+
64
+ Train:
65
+ dataset:
66
+ name: SimpleDataSet
67
+ data_dir: ./train_data/icdar2015/text_localization/
68
+ label_file_list:
69
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
70
+ ratio_list: [1.0]
71
+ transforms:
72
+ - DecodeImage: # load image
73
+ img_mode: BGR
74
+ channel_first: False
75
+ - DetLabelEncode: # Class handling label
76
+ - IaaAugment:
77
+ augmenter_args:
78
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
79
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
80
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
81
+ - EastRandomCropData:
82
+ size: [960, 960]
83
+ max_tries: 50
84
+ keep_ratio: true
85
+ - MakeBorderMap:
86
+ shrink_ratio: 0.4
87
+ thresh_min: 0.3
88
+ thresh_max: 0.7
89
+ - MakeShrinkMap:
90
+ shrink_ratio: 0.4
91
+ min_text_size: 8
92
+ - NormalizeImage:
93
+ scale: 1./255.
94
+ mean: [0.485, 0.456, 0.406]
95
+ std: [0.229, 0.224, 0.225]
96
+ order: 'hwc'
97
+ - ToCHWImage:
98
+ - KeepKeys:
99
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
100
+ loader:
101
+ shuffle: True
102
+ drop_last: False
103
+ batch_size_per_card: 8
104
+ num_workers: 4
105
+
106
+ Eval:
107
+ dataset:
108
+ name: SimpleDataSet
109
+ data_dir: ./train_data/icdar2015/text_localization/
110
+ label_file_list:
111
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
112
+ transforms:
113
+ - DecodeImage: # load image
114
+ img_mode: BGR
115
+ channel_first: False
116
+ - DetLabelEncode: # Class handling label
117
+ - DetResizeForTest:
118
+ # image_shape: [736, 1280]
119
+ - NormalizeImage:
120
+ scale: 1./255.
121
+ mean: [0.485, 0.456, 0.406]
122
+ std: [0.229, 0.224, 0.225]
123
+ order: 'hwc'
124
+ - ToCHWImage:
125
+ - KeepKeys:
126
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
127
+ loader:
128
+ shuffle: False
129
+ drop_last: False
130
+ batch_size_per_card: 1 # must be 1
131
+ num_workers: 2
configs/det/det_mv3_db.yml ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ use_xpu: false
4
+ use_mlu: false
5
+ epoch_num: 1200
6
+ log_smooth_window: 20
7
+ print_batch_step: 10
8
+ save_model_dir: ./output/db_mv3/
9
+ save_epoch_step: 1200
10
+ # evaluation is run every 2000 iterations
11
+ eval_batch_step: [0, 2000]
12
+ cal_metric_during_train: False
13
+ pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
14
+ checkpoints:
15
+ save_inference_dir:
16
+ use_visualdl: False
17
+ infer_img: doc/imgs_en/img_10.jpg
18
+ save_res_path: ./output/det_db/predicts_db.txt
19
+
20
+ Architecture:
21
+ model_type: det
22
+ algorithm: DB
23
+ Transform:
24
+ Backbone:
25
+ name: MobileNetV3
26
+ scale: 0.5
27
+ model_name: large
28
+ Neck:
29
+ name: DBFPN
30
+ out_channels: 256
31
+ Head:
32
+ name: DBHead
33
+ k: 50
34
+
35
+ Loss:
36
+ name: DBLoss
37
+ balance_loss: true
38
+ main_loss_type: DiceLoss
39
+ alpha: 5
40
+ beta: 10
41
+ ohem_ratio: 3
42
+
43
+ Optimizer:
44
+ name: Adam
45
+ beta1: 0.9
46
+ beta2: 0.999
47
+ lr:
48
+ learning_rate: 0.001
49
+ regularizer:
50
+ name: 'L2'
51
+ factor: 0
52
+
53
+ PostProcess:
54
+ name: DBPostProcess
55
+ thresh: 0.3
56
+ box_thresh: 0.6
57
+ max_candidates: 1000
58
+ unclip_ratio: 1.5
59
+
60
+ Metric:
61
+ name: DetMetric
62
+ main_indicator: hmean
63
+
64
+ Train:
65
+ dataset:
66
+ name: SimpleDataSet
67
+ data_dir: ./train_data/icdar2015/text_localization/
68
+ label_file_list:
69
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
70
+ ratio_list: [1.0]
71
+ transforms:
72
+ - DecodeImage: # load image
73
+ img_mode: BGR
74
+ channel_first: False
75
+ - DetLabelEncode: # Class handling label
76
+ - IaaAugment:
77
+ augmenter_args:
78
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
79
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
80
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
81
+ - EastRandomCropData:
82
+ size: [640, 640]
83
+ max_tries: 50
84
+ keep_ratio: true
85
+ - MakeBorderMap:
86
+ shrink_ratio: 0.4
87
+ thresh_min: 0.3
88
+ thresh_max: 0.7
89
+ - MakeShrinkMap:
90
+ shrink_ratio: 0.4
91
+ min_text_size: 8
92
+ - NormalizeImage:
93
+ scale: 1./255.
94
+ mean: [0.485, 0.456, 0.406]
95
+ std: [0.229, 0.224, 0.225]
96
+ order: 'hwc'
97
+ - ToCHWImage:
98
+ - KeepKeys:
99
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
100
+ loader:
101
+ shuffle: True
102
+ drop_last: False
103
+ batch_size_per_card: 16
104
+ num_workers: 8
105
+ use_shared_memory: True
106
+
107
+ Eval:
108
+ dataset:
109
+ name: SimpleDataSet
110
+ data_dir: ./train_data/icdar2015/text_localization/
111
+ label_file_list:
112
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
113
+ transforms:
114
+ - DecodeImage: # load image
115
+ img_mode: BGR
116
+ channel_first: False
117
+ - DetLabelEncode: # Class handling label
118
+ - DetResizeForTest:
119
+ image_shape: [736, 1280]
120
+ - NormalizeImage:
121
+ scale: 1./255.
122
+ mean: [0.485, 0.456, 0.406]
123
+ std: [0.229, 0.224, 0.225]
124
+ order: 'hwc'
125
+ - ToCHWImage:
126
+ - KeepKeys:
127
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
128
+ loader:
129
+ shuffle: False
130
+ drop_last: False
131
+ batch_size_per_card: 1 # must be 1
132
+ num_workers: 8
133
+ use_shared_memory: True
configs/det/det_mv3_east.yml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 10000
4
+ log_smooth_window: 20
5
+ print_batch_step: 2
6
+ save_model_dir: ./output/east_mv3/
7
+ save_epoch_step: 1000
8
+ # evaluation is run every 5000 iterations after the 4000th iteration
9
+ eval_batch_step: [4000, 5000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img:
16
+ save_res_path: ./output/det_east/predicts_east.txt
17
+
18
+ Architecture:
19
+ model_type: det
20
+ algorithm: EAST
21
+ Transform:
22
+ Backbone:
23
+ name: MobileNetV3
24
+ scale: 0.5
25
+ model_name: large
26
+ Neck:
27
+ name: EASTFPN
28
+ model_name: small
29
+ Head:
30
+ name: EASTHead
31
+ model_name: small
32
+
33
+ Loss:
34
+ name: EASTLoss
35
+
36
+ Optimizer:
37
+ name: Adam
38
+ beta1: 0.9
39
+ beta2: 0.999
40
+ lr:
41
+ # name: Cosine
42
+ learning_rate: 0.001
43
+ # warmup_epoch: 0
44
+ regularizer:
45
+ name: 'L2'
46
+ factor: 0
47
+
48
+ PostProcess:
49
+ name: EASTPostProcess
50
+ score_thresh: 0.8
51
+ cover_thresh: 0.1
52
+ nms_thresh: 0.2
53
+
54
+ Metric:
55
+ name: DetMetric
56
+ main_indicator: hmean
57
+
58
+ Train:
59
+ dataset:
60
+ name: SimpleDataSet
61
+ data_dir: ./train_data/icdar2015/text_localization/
62
+ label_file_list:
63
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
64
+ ratio_list: [1.0]
65
+ transforms:
66
+ - DecodeImage: # load image
67
+ img_mode: BGR
68
+ channel_first: False
69
+ - DetLabelEncode: # Class handling label
70
+ - EASTProcessTrain:
71
+ image_shape: [512, 512]
72
+ background_ratio: 0.125
73
+ min_crop_side_ratio: 0.1
74
+ min_text_size: 10
75
+ - KeepKeys:
76
+ keep_keys: ['image', 'score_map', 'geo_map', 'training_mask'] # dataloader will return list in this order
77
+ loader:
78
+ shuffle: True
79
+ drop_last: False
80
+ batch_size_per_card: 16
81
+ num_workers: 8
82
+
83
+ Eval:
84
+ dataset:
85
+ name: SimpleDataSet
86
+ data_dir: ./train_data/icdar2015/text_localization/
87
+ label_file_list:
88
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
89
+ transforms:
90
+ - DecodeImage: # load image
91
+ img_mode: BGR
92
+ channel_first: False
93
+ - DetLabelEncode: # Class handling label
94
+ - DetResizeForTest:
95
+ limit_side_len: 2400
96
+ limit_type: max
97
+ - NormalizeImage:
98
+ scale: 1./255.
99
+ mean: [0.485, 0.456, 0.406]
100
+ std: [0.229, 0.224, 0.225]
101
+ order: 'hwc'
102
+ - ToCHWImage:
103
+ - KeepKeys:
104
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
105
+ loader:
106
+ shuffle: False
107
+ drop_last: False
108
+ batch_size_per_card: 1 # must be 1
109
+ num_workers: 2
configs/det/det_mv3_pse.yml ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 600
4
+ log_smooth_window: 20
5
+ print_batch_step: 10
6
+ save_model_dir: ./output/det_mv3_pse/
7
+ save_epoch_step: 600
8
+ # evaluation is run every 63 iterations
9
+ eval_batch_step: [ 0,63 ]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained
12
+ checkpoints: #./output/det_r50_vd_pse_batch8_ColorJitter/best_accuracy
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_pse/predicts_pse.txt
17
+
18
+ Architecture:
19
+ model_type: det
20
+ algorithm: PSE
21
+ Transform: null
22
+ Backbone:
23
+ name: MobileNetV3
24
+ scale: 0.5
25
+ model_name: large
26
+ Neck:
27
+ name: FPN
28
+ out_channels: 96
29
+ Head:
30
+ name: PSEHead
31
+ hidden_dim: 96
32
+ out_channels: 7
33
+
34
+ Loss:
35
+ name: PSELoss
36
+ alpha: 0.7
37
+ ohem_ratio: 3
38
+ kernel_sample_mask: pred
39
+ reduction: none
40
+
41
+ Optimizer:
42
+ name: Adam
43
+ beta1: 0.9
44
+ beta2: 0.999
45
+ lr:
46
+ name: Step
47
+ learning_rate: 0.001
48
+ step_size: 200
49
+ gamma: 0.1
50
+ regularizer:
51
+ name: 'L2'
52
+ factor: 0.0005
53
+
54
+ PostProcess:
55
+ name: PSEPostProcess
56
+ thresh: 0
57
+ box_thresh: 0.85
58
+ min_area: 16
59
+ box_type: quad # 'quad' or 'poly'
60
+ scale: 1
61
+
62
+ Metric:
63
+ name: DetMetric
64
+ main_indicator: hmean
65
+
66
+ Train:
67
+ dataset:
68
+ name: SimpleDataSet
69
+ data_dir: ./train_data/icdar2015/text_localization/
70
+ label_file_list:
71
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
72
+ ratio_list: [ 1.0 ]
73
+ transforms:
74
+ - DecodeImage: # load image
75
+ img_mode: BGR
76
+ channel_first: False
77
+ - DetLabelEncode: # Class handling label
78
+ - ColorJitter:
79
+ brightness: 0.12549019607843137
80
+ saturation: 0.5
81
+ - IaaAugment:
82
+ augmenter_args:
83
+ - { 'type': Resize, 'args': { 'size': [ 0.5, 3 ] } }
84
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
85
+ - { 'type': Affine, 'args': { 'rotate': [ -10, 10 ] } }
86
+ - MakePseGt:
87
+ kernel_num: 7
88
+ min_shrink_ratio: 0.4
89
+ size: 640
90
+ - RandomCropImgMask:
91
+ size: [ 640,640 ]
92
+ main_key: gt_text
93
+ crop_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ]
94
+ - NormalizeImage:
95
+ scale: 1./255.
96
+ mean: [ 0.485, 0.456, 0.406 ]
97
+ std: [ 0.229, 0.224, 0.225 ]
98
+ order: 'hwc'
99
+ - ToCHWImage:
100
+ - KeepKeys:
101
+ keep_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] # the order of the dataloader list
102
+ loader:
103
+ shuffle: True
104
+ drop_last: False
105
+ batch_size_per_card: 16
106
+ num_workers: 8
107
+
108
+ Eval:
109
+ dataset:
110
+ name: SimpleDataSet
111
+ data_dir: ./train_data/icdar2015/text_localization/
112
+ label_file_list:
113
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
114
+ ratio_list: [ 1.0 ]
115
+ transforms:
116
+ - DecodeImage: # load image
117
+ img_mode: BGR
118
+ channel_first: False
119
+ - DetLabelEncode: # Class handling label
120
+ - DetResizeForTest:
121
+ limit_side_len: 736
122
+ limit_type: min
123
+ - NormalizeImage:
124
+ scale: 1./255.
125
+ mean: [ 0.485, 0.456, 0.406 ]
126
+ std: [ 0.229, 0.224, 0.225 ]
127
+ order: 'hwc'
128
+ - ToCHWImage:
129
+ - KeepKeys:
130
+ keep_keys: [ 'image', 'shape', 'polys', 'ignore_tags' ]
131
+ loader:
132
+ shuffle: False
133
+ drop_last: False
134
+ batch_size_per_card: 1 # must be 1
135
+ num_workers: 8
configs/det/det_r18_vd_ct.yml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 600
4
+ log_smooth_window: 20
5
+ print_batch_step: 10
6
+ save_model_dir: ./output/det_ct/
7
+ save_epoch_step: 10
8
+ # evaluation is run every 2000 iterations
9
+ eval_batch_step: [0,1000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/ResNet18_vd_pretrained.pdparams
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img623.jpg
16
+ save_res_path: ./output/det_ct/predicts_ct.txt
17
+
18
+ Architecture:
19
+ model_type: det
20
+ algorithm: CT
21
+ Transform:
22
+ Backbone:
23
+ name: ResNet_vd
24
+ layers: 18
25
+ Neck:
26
+ name: CTFPN
27
+ Head:
28
+ name: CT_Head
29
+ in_channels: 512
30
+ hidden_dim: 128
31
+ num_classes: 3
32
+
33
+ Loss:
34
+ name: CTLoss
35
+
36
+ Optimizer:
37
+ name: Adam
38
+ lr: #PolynomialDecay
39
+ name: Linear
40
+ learning_rate: 0.001
41
+ end_lr: 0.
42
+ epochs: 600
43
+ step_each_epoch: 1254
44
+ power: 0.9
45
+
46
+ PostProcess:
47
+ name: CTPostProcess
48
+ box_type: poly
49
+
50
+ Metric:
51
+ name: CTMetric
52
+ main_indicator: f_score
53
+
54
+ Train:
55
+ dataset:
56
+ name: SimpleDataSet
57
+ data_dir: ./train_data/total_text/train
58
+ label_file_list:
59
+ - ./train_data/total_text/train/train.txt
60
+ ratio_list: [1.0]
61
+ transforms:
62
+ - DecodeImage:
63
+ img_mode: RGB
64
+ channel_first: False
65
+ - CTLabelEncode: # Class handling label
66
+ - RandomScale:
67
+ - MakeShrink:
68
+ - GroupRandomHorizontalFlip:
69
+ - GroupRandomRotate:
70
+ - GroupRandomCropPadding:
71
+ - MakeCentripetalShift:
72
+ - ColorJitter:
73
+ brightness: 0.125
74
+ saturation: 0.5
75
+ - ToCHWImage:
76
+ - NormalizeImage:
77
+ - KeepKeys:
78
+ keep_keys: ['image', 'gt_kernel', 'training_mask', 'gt_instance', 'gt_kernel_instance', 'training_mask_distance', 'gt_distance'] # the order of the dataloader list
79
+ loader:
80
+ shuffle: True
81
+ drop_last: True
82
+ batch_size_per_card: 4
83
+ num_workers: 8
84
+
85
+ Eval:
86
+ dataset:
87
+ name: SimpleDataSet
88
+ data_dir: ./train_data/total_text/test
89
+ label_file_list:
90
+ - ./train_data/total_text/test/test.txt
91
+ ratio_list: [1.0]
92
+ transforms:
93
+ - DecodeImage:
94
+ img_mode: RGB
95
+ channel_first: False
96
+ - CTLabelEncode: # Class handling label
97
+ - ScaleAlignedShort:
98
+ - NormalizeImage:
99
+ order: 'hwc'
100
+ - ToCHWImage:
101
+ - KeepKeys:
102
+ keep_keys: ['image', 'shape', 'polys', 'texts'] # the order of the dataloader list
103
+ loader:
104
+ shuffle: False
105
+ drop_last: False
106
+ batch_size_per_card: 1
107
+ num_workers: 2
configs/det/det_r50_db++_icdar15.yml ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ debug: false
3
+ use_gpu: true
4
+ epoch_num: 1000
5
+ log_smooth_window: 20
6
+ print_batch_step: 10
7
+ save_model_dir: ./output/det_r50_icdar15/
8
+ save_epoch_step: 200
9
+ eval_batch_step:
10
+ - 0
11
+ - 2000
12
+ cal_metric_during_train: false
13
+ pretrained_model: ./pretrain_models/ResNet50_dcn_asf_synthtext_pretrained
14
+ checkpoints: null
15
+ save_inference_dir: null
16
+ use_visualdl: false
17
+ infer_img: doc/imgs_en/img_10.jpg
18
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
19
+ Architecture:
20
+ model_type: det
21
+ algorithm: DB++
22
+ Transform: null
23
+ Backbone:
24
+ name: ResNet
25
+ layers: 50
26
+ dcn_stage: [False, True, True, True]
27
+ Neck:
28
+ name: DBFPN
29
+ out_channels: 256
30
+ use_asf: True
31
+ Head:
32
+ name: DBHead
33
+ k: 50
34
+ Loss:
35
+ name: DBLoss
36
+ balance_loss: true
37
+ main_loss_type: BCELoss
38
+ alpha: 5
39
+ beta: 10
40
+ ohem_ratio: 3
41
+ Optimizer:
42
+ name: Momentum
43
+ momentum: 0.9
44
+ lr:
45
+ name: DecayLearningRate
46
+ learning_rate: 0.007
47
+ epochs: 1000
48
+ factor: 0.9
49
+ end_lr: 0
50
+ weight_decay: 0.0001
51
+ PostProcess:
52
+ name: DBPostProcess
53
+ thresh: 0.3
54
+ box_thresh: 0.6
55
+ max_candidates: 1000
56
+ unclip_ratio: 1.5
57
+ det_box_type: 'quad' # 'quad' or 'poly'
58
+ Metric:
59
+ name: DetMetric
60
+ main_indicator: hmean
61
+ Train:
62
+ dataset:
63
+ name: SimpleDataSet
64
+ data_dir: ./train_data/icdar2015/text_localization/
65
+ label_file_list:
66
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
67
+ ratio_list:
68
+ - 1.0
69
+ transforms:
70
+ - DecodeImage:
71
+ img_mode: BGR
72
+ channel_first: false
73
+ - DetLabelEncode: null
74
+ - IaaAugment:
75
+ augmenter_args:
76
+ - type: Fliplr
77
+ args:
78
+ p: 0.5
79
+ - type: Affine
80
+ args:
81
+ rotate:
82
+ - -10
83
+ - 10
84
+ - type: Resize
85
+ args:
86
+ size:
87
+ - 0.5
88
+ - 3
89
+ - EastRandomCropData:
90
+ size:
91
+ - 640
92
+ - 640
93
+ max_tries: 10
94
+ keep_ratio: true
95
+ - MakeShrinkMap:
96
+ shrink_ratio: 0.4
97
+ min_text_size: 8
98
+ - MakeBorderMap:
99
+ shrink_ratio: 0.4
100
+ thresh_min: 0.3
101
+ thresh_max: 0.7
102
+ - NormalizeImage:
103
+ scale: 1./255.
104
+ mean:
105
+ - 0.48109378172549
106
+ - 0.45752457890196
107
+ - 0.40787054090196
108
+ std:
109
+ - 1.0
110
+ - 1.0
111
+ - 1.0
112
+ order: hwc
113
+ - ToCHWImage: null
114
+ - KeepKeys:
115
+ keep_keys:
116
+ - image
117
+ - threshold_map
118
+ - threshold_mask
119
+ - shrink_map
120
+ - shrink_mask
121
+ loader:
122
+ shuffle: true
123
+ drop_last: false
124
+ batch_size_per_card: 4
125
+ num_workers: 8
126
+ Eval:
127
+ dataset:
128
+ name: SimpleDataSet
129
+ data_dir: ./train_data/icdar2015/text_localization
130
+ label_file_list:
131
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
132
+ transforms:
133
+ - DecodeImage:
134
+ img_mode: BGR
135
+ channel_first: false
136
+ - DetLabelEncode: null
137
+ - DetResizeForTest:
138
+ image_shape:
139
+ - 1152
140
+ - 2048
141
+ - NormalizeImage:
142
+ scale: 1./255.
143
+ mean:
144
+ - 0.48109378172549
145
+ - 0.45752457890196
146
+ - 0.40787054090196
147
+ std:
148
+ - 1.0
149
+ - 1.0
150
+ - 1.0
151
+ order: hwc
152
+ - ToCHWImage: null
153
+ - KeepKeys:
154
+ keep_keys:
155
+ - image
156
+ - shape
157
+ - polys
158
+ - ignore_tags
159
+ loader:
160
+ shuffle: false
161
+ drop_last: false
162
+ batch_size_per_card: 1
163
+ num_workers: 2
164
+ profiler_options: null
configs/det/det_r50_db++_td_tr.yml ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ debug: false
3
+ use_gpu: true
4
+ epoch_num: 1000
5
+ log_smooth_window: 20
6
+ print_batch_step: 10
7
+ save_model_dir: ./output/det_r50_td_tr/
8
+ save_epoch_step: 200
9
+ eval_batch_step:
10
+ - 0
11
+ - 2000
12
+ cal_metric_during_train: false
13
+ pretrained_model: ./pretrain_models/ResNet50_dcn_asf_synthtext_pretrained
14
+ checkpoints: null
15
+ save_inference_dir: null
16
+ use_visualdl: false
17
+ infer_img: doc/imgs_en/img_10.jpg
18
+ save_res_path: ./checkpoints/det_db/predicts_db.txt
19
+ Architecture:
20
+ model_type: det
21
+ algorithm: DB++
22
+ Transform: null
23
+ Backbone:
24
+ name: ResNet
25
+ layers: 50
26
+ dcn_stage: [False, True, True, True]
27
+ Neck:
28
+ name: DBFPN
29
+ out_channels: 256
30
+ use_asf: True
31
+ Head:
32
+ name: DBHead
33
+ k: 50
34
+ Loss:
35
+ name: DBLoss
36
+ balance_loss: true
37
+ main_loss_type: BCELoss
38
+ alpha: 5
39
+ beta: 10
40
+ ohem_ratio: 3
41
+ Optimizer:
42
+ name: Momentum
43
+ momentum: 0.9
44
+ lr:
45
+ name: DecayLearningRate
46
+ learning_rate: 0.007
47
+ epochs: 1000
48
+ factor: 0.9
49
+ end_lr: 0
50
+ weight_decay: 0.0001
51
+ PostProcess:
52
+ name: DBPostProcess
53
+ thresh: 0.3
54
+ box_thresh: 0.5
55
+ max_candidates: 1000
56
+ unclip_ratio: 1.5
57
+ det_box_type: 'quad' # 'quad' or 'poly'
58
+ Metric:
59
+ name: DetMetric
60
+ main_indicator: hmean
61
+ Train:
62
+ dataset:
63
+ name: SimpleDataSet
64
+ data_dir: ./train_data/
65
+ label_file_list:
66
+ - ./train_data/TD_TR/TD500/train_gt_labels.txt
67
+ - ./train_data/TD_TR/TR400/gt_labels.txt
68
+ ratio_list:
69
+ - 1.0
70
+ - 1.0
71
+ transforms:
72
+ - DecodeImage:
73
+ img_mode: BGR
74
+ channel_first: false
75
+ - DetLabelEncode: null
76
+ - IaaAugment:
77
+ augmenter_args:
78
+ - type: Fliplr
79
+ args:
80
+ p: 0.5
81
+ - type: Affine
82
+ args:
83
+ rotate:
84
+ - -10
85
+ - 10
86
+ - type: Resize
87
+ args:
88
+ size:
89
+ - 0.5
90
+ - 3
91
+ - EastRandomCropData:
92
+ size:
93
+ - 640
94
+ - 640
95
+ max_tries: 10
96
+ keep_ratio: true
97
+ - MakeShrinkMap:
98
+ shrink_ratio: 0.4
99
+ min_text_size: 8
100
+ - MakeBorderMap:
101
+ shrink_ratio: 0.4
102
+ thresh_min: 0.3
103
+ thresh_max: 0.7
104
+ - NormalizeImage:
105
+ scale: 1./255.
106
+ mean:
107
+ - 0.48109378172549
108
+ - 0.45752457890196
109
+ - 0.40787054090196
110
+ std:
111
+ - 1.0
112
+ - 1.0
113
+ - 1.0
114
+ order: hwc
115
+ - ToCHWImage: null
116
+ - KeepKeys:
117
+ keep_keys:
118
+ - image
119
+ - threshold_map
120
+ - threshold_mask
121
+ - shrink_map
122
+ - shrink_mask
123
+ loader:
124
+ shuffle: true
125
+ drop_last: false
126
+ batch_size_per_card: 4
127
+ num_workers: 8
128
+ Eval:
129
+ dataset:
130
+ name: SimpleDataSet
131
+ data_dir: ./train_data/
132
+ label_file_list:
133
+ - ./train_data/TD_TR/TD500/test_gt_labels.txt
134
+ transforms:
135
+ - DecodeImage:
136
+ img_mode: BGR
137
+ channel_first: false
138
+ - DetLabelEncode: null
139
+ - DetResizeForTest:
140
+ image_shape:
141
+ - 736
142
+ - 736
143
+ keep_ratio: True
144
+ - NormalizeImage:
145
+ scale: 1./255.
146
+ mean:
147
+ - 0.48109378172549
148
+ - 0.45752457890196
149
+ - 0.40787054090196
150
+ std:
151
+ - 1.0
152
+ - 1.0
153
+ - 1.0
154
+ order: hwc
155
+ - ToCHWImage: null
156
+ - KeepKeys:
157
+ keep_keys:
158
+ - image
159
+ - shape
160
+ - polys
161
+ - ignore_tags
162
+ loader:
163
+ shuffle: false
164
+ drop_last: false
165
+ batch_size_per_card: 1
166
+ num_workers: 2
167
+ profiler_options: null
configs/det/det_r50_drrg_ctw.yml ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 5
6
+ save_model_dir: ./output/det_r50_drrg_ctw/
7
+ save_epoch_step: 100
8
+ # evaluation is run every 1260 iterations
9
+ eval_batch_step: [37800, 1260]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained.pdparams
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_drrg/predicts_drrg.txt
17
+
18
+
19
+ Architecture:
20
+ model_type: det
21
+ algorithm: DRRG
22
+ Transform:
23
+ Backbone:
24
+ name: ResNet_vd
25
+ layers: 50
26
+ Neck:
27
+ name: FPN_UNet
28
+ in_channels: [256, 512, 1024, 2048]
29
+ out_channels: 32
30
+ Head:
31
+ name: DRRGHead
32
+ in_channels: 32
33
+ text_region_thr: 0.3
34
+ center_region_thr: 0.4
35
+ Loss:
36
+ name: DRRGLoss
37
+
38
+ Optimizer:
39
+ name: Momentum
40
+ momentum: 0.9
41
+ lr:
42
+ name: DecayLearningRate
43
+ learning_rate: 0.028
44
+ epochs: 1200
45
+ factor: 0.9
46
+ end_lr: 0.0000001
47
+ weight_decay: 0.0001
48
+
49
+ PostProcess:
50
+ name: DRRGPostprocess
51
+ link_thr: 0.8
52
+
53
+ Metric:
54
+ name: DetFCEMetric
55
+ main_indicator: hmean
56
+
57
+ Train:
58
+ dataset:
59
+ name: SimpleDataSet
60
+ data_dir: ./train_data/ctw1500/imgs/
61
+ label_file_list:
62
+ - ./train_data/ctw1500/imgs/training.txt
63
+ transforms:
64
+ - DecodeImage: # load image
65
+ img_mode: BGR
66
+ channel_first: False
67
+ ignore_orientation: True
68
+ - DetLabelEncode: # Class handling label
69
+ - ColorJitter:
70
+ brightness: 0.12549019607843137
71
+ saturation: 0.5
72
+ - RandomScaling:
73
+ - RandomCropFlip:
74
+ crop_ratio: 0.5
75
+ - RandomCropPolyInstances:
76
+ crop_ratio: 0.8
77
+ min_side_ratio: 0.3
78
+ - RandomRotatePolyInstances:
79
+ rotate_ratio: 0.5
80
+ max_angle: 60
81
+ pad_with_fixed_color: False
82
+ - SquareResizePad:
83
+ target_size: 800
84
+ pad_ratio: 0.6
85
+ - IaaAugment:
86
+ augmenter_args:
87
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
88
+ - DRRGTargets:
89
+ - NormalizeImage:
90
+ scale: 1./255.
91
+ mean: [0.485, 0.456, 0.406]
92
+ std: [0.229, 0.224, 0.225]
93
+ order: 'hwc'
94
+ - ToCHWImage:
95
+ - KeepKeys:
96
+ keep_keys: ['image', 'gt_text_mask', 'gt_center_region_mask', 'gt_mask',
97
+ 'gt_top_height_map', 'gt_bot_height_map', 'gt_sin_map',
98
+ 'gt_cos_map', 'gt_comp_attribs'] # dataloader will return list in this order
99
+ loader:
100
+ shuffle: True
101
+ drop_last: False
102
+ batch_size_per_card: 4
103
+ num_workers: 8
104
+
105
+ Eval:
106
+ dataset:
107
+ name: SimpleDataSet
108
+ data_dir: ./train_data/ctw1500/imgs/
109
+ label_file_list:
110
+ - ./train_data/ctw1500/imgs/test.txt
111
+ transforms:
112
+ - DecodeImage: # load image
113
+ img_mode: BGR
114
+ channel_first: False
115
+ ignore_orientation: True
116
+ - DetLabelEncode: # Class handling label
117
+ - DetResizeForTest:
118
+ limit_type: 'min'
119
+ limit_side_len: 640
120
+ - NormalizeImage:
121
+ scale: 1./255.
122
+ mean: [0.485, 0.456, 0.406]
123
+ std: [0.229, 0.224, 0.225]
124
+ order: 'hwc'
125
+ - Pad:
126
+ - ToCHWImage:
127
+ - KeepKeys:
128
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
129
+ loader:
130
+ shuffle: False
131
+ drop_last: False
132
+ batch_size_per_card: 1 # must be 1
133
+ num_workers: 2
configs/det/det_r50_vd_db.yml ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Global:
2
+ use_gpu: true
3
+ epoch_num: 1200
4
+ log_smooth_window: 20
5
+ print_batch_step: 10
6
+ save_model_dir: ./output/det_r50_vd/
7
+ save_epoch_step: 1200
8
+ # evaluation is run every 2000 iterations
9
+ eval_batch_step: [0,2000]
10
+ cal_metric_during_train: False
11
+ pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained
12
+ checkpoints:
13
+ save_inference_dir:
14
+ use_visualdl: False
15
+ infer_img: doc/imgs_en/img_10.jpg
16
+ save_res_path: ./output/det_db/predicts_db.txt
17
+
18
+ Architecture:
19
+ model_type: det
20
+ algorithm: DB
21
+ Transform:
22
+ Backbone:
23
+ name: ResNet_vd
24
+ layers: 50
25
+ Neck:
26
+ name: DBFPN
27
+ out_channels: 256
28
+ Head:
29
+ name: DBHead
30
+ k: 50
31
+
32
+ Loss:
33
+ name: DBLoss
34
+ balance_loss: true
35
+ main_loss_type: DiceLoss
36
+ alpha: 5
37
+ beta: 10
38
+ ohem_ratio: 3
39
+
40
+ Optimizer:
41
+ name: Adam
42
+ beta1: 0.9
43
+ beta2: 0.999
44
+ lr:
45
+ learning_rate: 0.001
46
+ regularizer:
47
+ name: 'L2'
48
+ factor: 0
49
+
50
+ PostProcess:
51
+ name: DBPostProcess
52
+ thresh: 0.3
53
+ box_thresh: 0.7
54
+ max_candidates: 1000
55
+ unclip_ratio: 1.5
56
+
57
+ Metric:
58
+ name: DetMetric
59
+ main_indicator: hmean
60
+
61
+ Train:
62
+ dataset:
63
+ name: SimpleDataSet
64
+ data_dir: ./train_data/icdar2015/text_localization/
65
+ label_file_list:
66
+ - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt
67
+ ratio_list: [1.0]
68
+ transforms:
69
+ - DecodeImage: # load image
70
+ img_mode: BGR
71
+ channel_first: False
72
+ - DetLabelEncode: # Class handling label
73
+ - IaaAugment:
74
+ augmenter_args:
75
+ - { 'type': Fliplr, 'args': { 'p': 0.5 } }
76
+ - { 'type': Affine, 'args': { 'rotate': [-10, 10] } }
77
+ - { 'type': Resize, 'args': { 'size': [0.5, 3] } }
78
+ - EastRandomCropData:
79
+ size: [640, 640]
80
+ max_tries: 50
81
+ keep_ratio: true
82
+ - MakeBorderMap:
83
+ shrink_ratio: 0.4
84
+ thresh_min: 0.3
85
+ thresh_max: 0.7
86
+ - MakeShrinkMap:
87
+ shrink_ratio: 0.4
88
+ min_text_size: 8
89
+ - NormalizeImage:
90
+ scale: 1./255.
91
+ mean: [0.485, 0.456, 0.406]
92
+ std: [0.229, 0.224, 0.225]
93
+ order: 'hwc'
94
+ - ToCHWImage:
95
+ - KeepKeys:
96
+ keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list
97
+ loader:
98
+ shuffle: True
99
+ drop_last: False
100
+ batch_size_per_card: 16
101
+ num_workers: 4
102
+
103
+ Eval:
104
+ dataset:
105
+ name: SimpleDataSet
106
+ data_dir: ./train_data/icdar2015/text_localization/
107
+ label_file_list:
108
+ - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt
109
+ transforms:
110
+ - DecodeImage: # load image
111
+ img_mode: BGR
112
+ channel_first: False
113
+ - DetLabelEncode: # Class handling label
114
+ - DetResizeForTest:
115
+ image_shape: [736, 1280]
116
+ - NormalizeImage:
117
+ scale: 1./255.
118
+ mean: [0.485, 0.456, 0.406]
119
+ std: [0.229, 0.224, 0.225]
120
+ order: 'hwc'
121
+ - ToCHWImage:
122
+ - KeepKeys:
123
+ keep_keys: ['image', 'shape', 'polys', 'ignore_tags']
124
+ loader:
125
+ shuffle: False
126
+ drop_last: False
127
+ batch_size_per_card: 1 # must be 1
128
+ num_workers: 8