WensongSong commited on
Commit
26ff915
·
1 Parent(s): abd08dc
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. pytorch3d-0.7.8/.circleci/build_count.py +33 -0
  2. pytorch3d-0.7.8/.circleci/check.sh +13 -0
  3. pytorch3d-0.7.8/.circleci/config.in.yml +171 -0
  4. pytorch3d-0.7.8/.circleci/config.yml +688 -0
  5. pytorch3d-0.7.8/.circleci/regenerate.py +183 -0
  6. pytorch3d-0.7.8/.clang-format +85 -0
  7. pytorch3d-0.7.8/.flake8 +9 -0
  8. pytorch3d-0.7.8/.github/CODE_OF_CONDUCT.md +76 -0
  9. pytorch3d-0.7.8/.github/CONTRIBUTING.md +53 -0
  10. pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/bugs.md +30 -0
  11. pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/config.yml +1 -0
  12. pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/feature_request.md +21 -0
  13. pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/questions-help.md +21 -0
  14. pytorch3d-0.7.8/.gitignore +21 -0
  15. pytorch3d-0.7.8/INSTALL.md +157 -0
  16. pytorch3d-0.7.8/LICENSE +30 -0
  17. pytorch3d-0.7.8/LICENSE-3RD-PARTY +71 -0
  18. pytorch3d-0.7.8/README.md +183 -0
  19. pytorch3d-0.7.8/dev/linter.sh +40 -0
  20. pytorch3d-0.7.8/dev/run_tutorials.sh +56 -0
  21. pytorch3d-0.7.8/dev/test_list.py +65 -0
  22. pytorch3d-0.7.8/docs/.gitignore +7 -0
  23. pytorch3d-0.7.8/docs/.readthedocs.yaml +27 -0
  24. pytorch3d-0.7.8/docs/Makefile +25 -0
  25. pytorch3d-0.7.8/docs/README.md +77 -0
  26. pytorch3d-0.7.8/docs/conf.py +198 -0
  27. pytorch3d-0.7.8/docs/examples/pulsar_basic.py +76 -0
  28. pytorch3d-0.7.8/docs/examples/pulsar_basic_unified.py +89 -0
  29. pytorch3d-0.7.8/docs/examples/pulsar_cam.py +180 -0
  30. pytorch3d-0.7.8/docs/examples/pulsar_cam_unified.py +230 -0
  31. pytorch3d-0.7.8/docs/examples/pulsar_multiview.py +230 -0
  32. pytorch3d-0.7.8/docs/examples/pulsar_optimization.py +164 -0
  33. pytorch3d-0.7.8/docs/examples/pulsar_optimization_unified.py +189 -0
  34. pytorch3d-0.7.8/docs/generate_stubs.py +162 -0
  35. pytorch3d-0.7.8/docs/index.rst +17 -0
  36. pytorch3d-0.7.8/docs/modules/common.rst +6 -0
  37. pytorch3d-0.7.8/docs/modules/datasets.rst +9 -0
  38. pytorch3d-0.7.8/docs/modules/implicitron/data_basics.rst +29 -0
  39. pytorch3d-0.7.8/docs/modules/implicitron/datasets.rst +29 -0
  40. pytorch3d-0.7.8/docs/modules/implicitron/evaluation.rst +14 -0
  41. pytorch3d-0.7.8/docs/modules/implicitron/index.rst +10 -0
  42. pytorch3d-0.7.8/docs/modules/implicitron/models/base_model.rst +9 -0
  43. pytorch3d-0.7.8/docs/modules/implicitron/models/feature_extractor/feature_extractor.rst +9 -0
  44. pytorch3d-0.7.8/docs/modules/implicitron/models/feature_extractor/index.rst +7 -0
  45. pytorch3d-0.7.8/docs/modules/implicitron/models/feature_extractor/resnet_feature_extractor.rst +9 -0
  46. pytorch3d-0.7.8/docs/modules/implicitron/models/generic_model.rst +9 -0
  47. pytorch3d-0.7.8/docs/modules/implicitron/models/global_encoder/autodecoder.rst +9 -0
  48. pytorch3d-0.7.8/docs/modules/implicitron/models/global_encoder/global_encoder.rst +9 -0
  49. pytorch3d-0.7.8/docs/modules/implicitron/models/global_encoder/index.rst +7 -0
  50. pytorch3d-0.7.8/docs/modules/implicitron/models/implicit_function/base.rst +9 -0
pytorch3d-0.7.8/.circleci/build_count.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ Print the number of nightly builds
10
+ """
11
+
12
+ from collections import Counter
13
+
14
+ import yaml
15
+
16
+
17
+ conf = yaml.safe_load(open("config.yml"))
18
+ jobs = conf["workflows"]["build_and_test"]["jobs"]
19
+
20
+
21
+ def jobtype(job):
22
+ if isinstance(job, str):
23
+ return job
24
+ if len(job) == 1:
25
+ [name] = job.keys()
26
+ return name
27
+ return "MULTIPLE PARTS"
28
+
29
+
30
+ for i, j in Counter(map(jobtype, jobs)).items():
31
+ print(i, j)
32
+ print()
33
+ print(len(jobs))
pytorch3d-0.7.8/.circleci/check.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -e
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ # Run this script before committing config.yml to verify it is valid yaml.
9
+
10
+ python -c 'import yaml; yaml.safe_load(open("config.yml"))' && echo OK - valid yaml
11
+
12
+ msg="circleci not installed so can't check schema"
13
+ command -v circleci > /dev/null && (cd ..; circleci config validate) || echo "$msg"
pytorch3d-0.7.8/.circleci/config.in.yml ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2.1
2
+
3
+ #examples:
4
+ #https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
5
+ #https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
6
+ #https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
7
+
8
+ #drive tests with nox or tox or pytest?
9
+
10
+ # -------------------------------------------------------------------------------------
11
+ # environments where we run our jobs
12
+ # -------------------------------------------------------------------------------------
13
+
14
+
15
+ setupcuda: &setupcuda
16
+ run:
17
+ name: Setup CUDA
18
+ working_directory: ~/
19
+ command: |
20
+ # download and install nvidia drivers, cuda, etc
21
+ wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
22
+ sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
23
+ echo "Done installing CUDA."
24
+ pyenv versions
25
+ nvidia-smi
26
+ pyenv global 3.9.1
27
+
28
+ binary_common: &binary_common
29
+ parameters:
30
+ # Edit these defaults to do a release`
31
+ build_version:
32
+ description: "version number of release binary; by default, build a nightly"
33
+ type: string
34
+ default: ""
35
+ pytorch_version:
36
+ description: "PyTorch version to build against; by default, use a nightly"
37
+ type: string
38
+ default: ""
39
+ # Don't edit these
40
+ python_version:
41
+ description: "Python version to build against (e.g., 3.7)"
42
+ type: string
43
+ cu_version:
44
+ description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
45
+ type: string
46
+ wheel_docker_image:
47
+ description: "Wheel only: what docker image to use"
48
+ type: string
49
+ default: "pytorch/manylinux-cuda101"
50
+ conda_docker_image:
51
+ description: "what docker image to use for docker"
52
+ type: string
53
+ default: "pytorch/conda-cuda"
54
+ environment:
55
+ PYTHON_VERSION: << parameters.python_version >>
56
+ BUILD_VERSION: << parameters.build_version >>
57
+ PYTORCH_VERSION: << parameters.pytorch_version >>
58
+ CU_VERSION: << parameters.cu_version >>
59
+ TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>
60
+
61
+ jobs:
62
+ main:
63
+ environment:
64
+ CUDA_VERSION: "11.3"
65
+ resource_class: gpu.nvidia.small.multi
66
+ machine:
67
+ image: linux-cuda-11:default
68
+ steps:
69
+ - checkout
70
+ - <<: *setupcuda
71
+ - run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
72
+ - run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
73
+ # - run: conda create -p ~/conda_env python=3.7 numpy
74
+ # - run: conda activate ~/conda_env
75
+ # - run: conda install -c pytorch pytorch torchvision
76
+
77
+ - run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
78
+ - run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
79
+ - run:
80
+ name: build
81
+ command: |
82
+ export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
83
+ python3 setup.py build_ext --inplace
84
+ - run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
85
+ - run: python3 setup.py bdist_wheel
86
+
87
+ binary_linux_wheel:
88
+ <<: *binary_common
89
+ docker:
90
+ - image: << parameters.wheel_docker_image >>
91
+ auth:
92
+ username: $DOCKERHUB_USERNAME
93
+ password: $DOCKERHUB_TOKEN
94
+ resource_class: 2xlarge+
95
+ steps:
96
+ - checkout
97
+ - run: MAX_JOBS=15 packaging/build_wheel.sh
98
+ - store_artifacts:
99
+ path: dist
100
+ - persist_to_workspace:
101
+ root: dist
102
+ paths:
103
+ - "*"
104
+
105
+ binary_linux_conda:
106
+ <<: *binary_common
107
+ docker:
108
+ - image: "<< parameters.conda_docker_image >>"
109
+ auth:
110
+ username: $DOCKERHUB_USERNAME
111
+ password: $DOCKERHUB_TOKEN
112
+ resource_class: 2xlarge+
113
+ steps:
114
+ - checkout
115
+ # This is building with cuda but no gpu present,
116
+ # so we aren't running the tests.
117
+ - run:
118
+ name: build
119
+ no_output_timeout: 40m
120
+ command: MAX_JOBS=15 TEST_FLAG=--no-test python3 packaging/build_conda.py
121
+ - store_artifacts:
122
+ path: /opt/conda/conda-bld/linux-64
123
+ - persist_to_workspace:
124
+ root: /opt/conda/conda-bld/linux-64
125
+ paths:
126
+ - "*"
127
+
128
+ binary_linux_conda_cuda:
129
+ <<: *binary_common
130
+ machine:
131
+ image: linux-cuda-11:default
132
+ resource_class: gpu.nvidia.small.multi
133
+ steps:
134
+ - checkout
135
+
136
+ - run:
137
+ name: Pull docker image
138
+ command: |
139
+ nvidia-smi
140
+ set -e
141
+
142
+ { docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
143
+
144
+ echo Pulling docker image $TESTRUN_DOCKER_IMAGE
145
+ docker pull $TESTRUN_DOCKER_IMAGE
146
+ - run:
147
+ name: Build and run tests
148
+ no_output_timeout: 40m
149
+ command: |
150
+ set -e
151
+
152
+ cd ${HOME}/project/
153
+
154
+ export JUST_TESTRUN=1
155
+ VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
156
+
157
+ docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} python3 ./packaging/build_conda.py
158
+
159
+ workflows:
160
+ version: 2
161
+ build_and_test:
162
+ jobs:
163
+ # - main:
164
+ # context: DOCKERHUB_TOKEN
165
+ {{workflows()}}
166
+ - binary_linux_conda_cuda:
167
+ name: testrun_conda_cuda_py310_cu117_pyt201
168
+ context: DOCKERHUB_TOKEN
169
+ python_version: "3.10"
170
+ pytorch_version: '2.0.1'
171
+ cu_version: "cu117"
pytorch3d-0.7.8/.circleci/config.yml ADDED
@@ -0,0 +1,688 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2.1
2
+
3
+ #examples:
4
+ #https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
5
+ #https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
6
+ #https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
7
+
8
+ #drive tests with nox or tox or pytest?
9
+
10
+ # -------------------------------------------------------------------------------------
11
+ # environments where we run our jobs
12
+ # -------------------------------------------------------------------------------------
13
+
14
+
15
+ setupcuda: &setupcuda
16
+ run:
17
+ name: Setup CUDA
18
+ working_directory: ~/
19
+ command: |
20
+ # download and install nvidia drivers, cuda, etc
21
+ wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
22
+ sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
23
+ echo "Done installing CUDA."
24
+ pyenv versions
25
+ nvidia-smi
26
+ pyenv global 3.9.1
27
+
28
+ binary_common: &binary_common
29
+ parameters:
30
+ # Edit these defaults to do a release`
31
+ build_version:
32
+ description: "version number of release binary; by default, build a nightly"
33
+ type: string
34
+ default: ""
35
+ pytorch_version:
36
+ description: "PyTorch version to build against; by default, use a nightly"
37
+ type: string
38
+ default: ""
39
+ # Don't edit these
40
+ python_version:
41
+ description: "Python version to build against (e.g., 3.7)"
42
+ type: string
43
+ cu_version:
44
+ description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
45
+ type: string
46
+ wheel_docker_image:
47
+ description: "Wheel only: what docker image to use"
48
+ type: string
49
+ default: "pytorch/manylinux-cuda101"
50
+ conda_docker_image:
51
+ description: "what docker image to use for docker"
52
+ type: string
53
+ default: "pytorch/conda-cuda"
54
+ environment:
55
+ PYTHON_VERSION: << parameters.python_version >>
56
+ BUILD_VERSION: << parameters.build_version >>
57
+ PYTORCH_VERSION: << parameters.pytorch_version >>
58
+ CU_VERSION: << parameters.cu_version >>
59
+ TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>
60
+
61
+ jobs:
62
+ main:
63
+ environment:
64
+ CUDA_VERSION: "11.3"
65
+ resource_class: gpu.nvidia.small.multi
66
+ machine:
67
+ image: linux-cuda-11:default
68
+ steps:
69
+ - checkout
70
+ - <<: *setupcuda
71
+ - run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
72
+ - run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
73
+ # - run: conda create -p ~/conda_env python=3.7 numpy
74
+ # - run: conda activate ~/conda_env
75
+ # - run: conda install -c pytorch pytorch torchvision
76
+
77
+ - run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
78
+ - run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
79
+ - run:
80
+ name: build
81
+ command: |
82
+ export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
83
+ python3 setup.py build_ext --inplace
84
+ - run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
85
+ - run: python3 setup.py bdist_wheel
86
+
87
+ binary_linux_wheel:
88
+ <<: *binary_common
89
+ docker:
90
+ - image: << parameters.wheel_docker_image >>
91
+ auth:
92
+ username: $DOCKERHUB_USERNAME
93
+ password: $DOCKERHUB_TOKEN
94
+ resource_class: 2xlarge+
95
+ steps:
96
+ - checkout
97
+ - run: MAX_JOBS=15 packaging/build_wheel.sh
98
+ - store_artifacts:
99
+ path: dist
100
+ - persist_to_workspace:
101
+ root: dist
102
+ paths:
103
+ - "*"
104
+
105
+ binary_linux_conda:
106
+ <<: *binary_common
107
+ docker:
108
+ - image: "<< parameters.conda_docker_image >>"
109
+ auth:
110
+ username: $DOCKERHUB_USERNAME
111
+ password: $DOCKERHUB_TOKEN
112
+ resource_class: 2xlarge+
113
+ steps:
114
+ - checkout
115
+ # This is building with cuda but no gpu present,
116
+ # so we aren't running the tests.
117
+ - run:
118
+ name: build
119
+ no_output_timeout: 40m
120
+ command: MAX_JOBS=15 TEST_FLAG=--no-test python3 packaging/build_conda.py
121
+ - store_artifacts:
122
+ path: /opt/conda/conda-bld/linux-64
123
+ - persist_to_workspace:
124
+ root: /opt/conda/conda-bld/linux-64
125
+ paths:
126
+ - "*"
127
+
128
+ binary_linux_conda_cuda:
129
+ <<: *binary_common
130
+ machine:
131
+ image: linux-cuda-11:default
132
+ resource_class: gpu.nvidia.small.multi
133
+ steps:
134
+ - checkout
135
+
136
+ - run:
137
+ name: Pull docker image
138
+ command: |
139
+ nvidia-smi
140
+ set -e
141
+
142
+ { docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
143
+
144
+ echo Pulling docker image $TESTRUN_DOCKER_IMAGE
145
+ docker pull $TESTRUN_DOCKER_IMAGE
146
+ - run:
147
+ name: Build and run tests
148
+ no_output_timeout: 40m
149
+ command: |
150
+ set -e
151
+
152
+ cd ${HOME}/project/
153
+
154
+ export JUST_TESTRUN=1
155
+ VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
156
+
157
+ docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} python3 ./packaging/build_conda.py
158
+
159
+ workflows:
160
+ version: 2
161
+ build_and_test:
162
+ jobs:
163
+ # - main:
164
+ # context: DOCKERHUB_TOKEN
165
+ - binary_linux_conda:
166
+ conda_docker_image: pytorch/conda-builder:cuda118
167
+ context: DOCKERHUB_TOKEN
168
+ cu_version: cu118
169
+ name: linux_conda_py38_cu118_pyt210
170
+ python_version: '3.8'
171
+ pytorch_version: 2.1.0
172
+ - binary_linux_conda:
173
+ conda_docker_image: pytorch/conda-builder:cuda121
174
+ context: DOCKERHUB_TOKEN
175
+ cu_version: cu121
176
+ name: linux_conda_py38_cu121_pyt210
177
+ python_version: '3.8'
178
+ pytorch_version: 2.1.0
179
+ - binary_linux_conda:
180
+ conda_docker_image: pytorch/conda-builder:cuda118
181
+ context: DOCKERHUB_TOKEN
182
+ cu_version: cu118
183
+ name: linux_conda_py38_cu118_pyt211
184
+ python_version: '3.8'
185
+ pytorch_version: 2.1.1
186
+ - binary_linux_conda:
187
+ conda_docker_image: pytorch/conda-builder:cuda121
188
+ context: DOCKERHUB_TOKEN
189
+ cu_version: cu121
190
+ name: linux_conda_py38_cu121_pyt211
191
+ python_version: '3.8'
192
+ pytorch_version: 2.1.1
193
+ - binary_linux_conda:
194
+ conda_docker_image: pytorch/conda-builder:cuda118
195
+ context: DOCKERHUB_TOKEN
196
+ cu_version: cu118
197
+ name: linux_conda_py38_cu118_pyt212
198
+ python_version: '3.8'
199
+ pytorch_version: 2.1.2
200
+ - binary_linux_conda:
201
+ conda_docker_image: pytorch/conda-builder:cuda121
202
+ context: DOCKERHUB_TOKEN
203
+ cu_version: cu121
204
+ name: linux_conda_py38_cu121_pyt212
205
+ python_version: '3.8'
206
+ pytorch_version: 2.1.2
207
+ - binary_linux_conda:
208
+ conda_docker_image: pytorch/conda-builder:cuda118
209
+ context: DOCKERHUB_TOKEN
210
+ cu_version: cu118
211
+ name: linux_conda_py38_cu118_pyt220
212
+ python_version: '3.8'
213
+ pytorch_version: 2.2.0
214
+ - binary_linux_conda:
215
+ conda_docker_image: pytorch/conda-builder:cuda121
216
+ context: DOCKERHUB_TOKEN
217
+ cu_version: cu121
218
+ name: linux_conda_py38_cu121_pyt220
219
+ python_version: '3.8'
220
+ pytorch_version: 2.2.0
221
+ - binary_linux_conda:
222
+ conda_docker_image: pytorch/conda-builder:cuda118
223
+ context: DOCKERHUB_TOKEN
224
+ cu_version: cu118
225
+ name: linux_conda_py38_cu118_pyt222
226
+ python_version: '3.8'
227
+ pytorch_version: 2.2.2
228
+ - binary_linux_conda:
229
+ conda_docker_image: pytorch/conda-builder:cuda121
230
+ context: DOCKERHUB_TOKEN
231
+ cu_version: cu121
232
+ name: linux_conda_py38_cu121_pyt222
233
+ python_version: '3.8'
234
+ pytorch_version: 2.2.2
235
+ - binary_linux_conda:
236
+ conda_docker_image: pytorch/conda-builder:cuda118
237
+ context: DOCKERHUB_TOKEN
238
+ cu_version: cu118
239
+ name: linux_conda_py38_cu118_pyt231
240
+ python_version: '3.8'
241
+ pytorch_version: 2.3.1
242
+ - binary_linux_conda:
243
+ conda_docker_image: pytorch/conda-builder:cuda121
244
+ context: DOCKERHUB_TOKEN
245
+ cu_version: cu121
246
+ name: linux_conda_py38_cu121_pyt231
247
+ python_version: '3.8'
248
+ pytorch_version: 2.3.1
249
+ - binary_linux_conda:
250
+ conda_docker_image: pytorch/conda-builder:cuda118
251
+ context: DOCKERHUB_TOKEN
252
+ cu_version: cu118
253
+ name: linux_conda_py38_cu118_pyt240
254
+ python_version: '3.8'
255
+ pytorch_version: 2.4.0
256
+ - binary_linux_conda:
257
+ conda_docker_image: pytorch/conda-builder:cuda121
258
+ context: DOCKERHUB_TOKEN
259
+ cu_version: cu121
260
+ name: linux_conda_py38_cu121_pyt240
261
+ python_version: '3.8'
262
+ pytorch_version: 2.4.0
263
+ - binary_linux_conda:
264
+ conda_docker_image: pytorch/conda-builder:cuda118
265
+ context: DOCKERHUB_TOKEN
266
+ cu_version: cu118
267
+ name: linux_conda_py38_cu118_pyt241
268
+ python_version: '3.8'
269
+ pytorch_version: 2.4.1
270
+ - binary_linux_conda:
271
+ conda_docker_image: pytorch/conda-builder:cuda121
272
+ context: DOCKERHUB_TOKEN
273
+ cu_version: cu121
274
+ name: linux_conda_py38_cu121_pyt241
275
+ python_version: '3.8'
276
+ pytorch_version: 2.4.1
277
+ - binary_linux_conda:
278
+ conda_docker_image: pytorch/conda-builder:cuda118
279
+ context: DOCKERHUB_TOKEN
280
+ cu_version: cu118
281
+ name: linux_conda_py39_cu118_pyt210
282
+ python_version: '3.9'
283
+ pytorch_version: 2.1.0
284
+ - binary_linux_conda:
285
+ conda_docker_image: pytorch/conda-builder:cuda121
286
+ context: DOCKERHUB_TOKEN
287
+ cu_version: cu121
288
+ name: linux_conda_py39_cu121_pyt210
289
+ python_version: '3.9'
290
+ pytorch_version: 2.1.0
291
+ - binary_linux_conda:
292
+ conda_docker_image: pytorch/conda-builder:cuda118
293
+ context: DOCKERHUB_TOKEN
294
+ cu_version: cu118
295
+ name: linux_conda_py39_cu118_pyt211
296
+ python_version: '3.9'
297
+ pytorch_version: 2.1.1
298
+ - binary_linux_conda:
299
+ conda_docker_image: pytorch/conda-builder:cuda121
300
+ context: DOCKERHUB_TOKEN
301
+ cu_version: cu121
302
+ name: linux_conda_py39_cu121_pyt211
303
+ python_version: '3.9'
304
+ pytorch_version: 2.1.1
305
+ - binary_linux_conda:
306
+ conda_docker_image: pytorch/conda-builder:cuda118
307
+ context: DOCKERHUB_TOKEN
308
+ cu_version: cu118
309
+ name: linux_conda_py39_cu118_pyt212
310
+ python_version: '3.9'
311
+ pytorch_version: 2.1.2
312
+ - binary_linux_conda:
313
+ conda_docker_image: pytorch/conda-builder:cuda121
314
+ context: DOCKERHUB_TOKEN
315
+ cu_version: cu121
316
+ name: linux_conda_py39_cu121_pyt212
317
+ python_version: '3.9'
318
+ pytorch_version: 2.1.2
319
+ - binary_linux_conda:
320
+ conda_docker_image: pytorch/conda-builder:cuda118
321
+ context: DOCKERHUB_TOKEN
322
+ cu_version: cu118
323
+ name: linux_conda_py39_cu118_pyt220
324
+ python_version: '3.9'
325
+ pytorch_version: 2.2.0
326
+ - binary_linux_conda:
327
+ conda_docker_image: pytorch/conda-builder:cuda121
328
+ context: DOCKERHUB_TOKEN
329
+ cu_version: cu121
330
+ name: linux_conda_py39_cu121_pyt220
331
+ python_version: '3.9'
332
+ pytorch_version: 2.2.0
333
+ - binary_linux_conda:
334
+ conda_docker_image: pytorch/conda-builder:cuda118
335
+ context: DOCKERHUB_TOKEN
336
+ cu_version: cu118
337
+ name: linux_conda_py39_cu118_pyt222
338
+ python_version: '3.9'
339
+ pytorch_version: 2.2.2
340
+ - binary_linux_conda:
341
+ conda_docker_image: pytorch/conda-builder:cuda121
342
+ context: DOCKERHUB_TOKEN
343
+ cu_version: cu121
344
+ name: linux_conda_py39_cu121_pyt222
345
+ python_version: '3.9'
346
+ pytorch_version: 2.2.2
347
+ - binary_linux_conda:
348
+ conda_docker_image: pytorch/conda-builder:cuda118
349
+ context: DOCKERHUB_TOKEN
350
+ cu_version: cu118
351
+ name: linux_conda_py39_cu118_pyt231
352
+ python_version: '3.9'
353
+ pytorch_version: 2.3.1
354
+ - binary_linux_conda:
355
+ conda_docker_image: pytorch/conda-builder:cuda121
356
+ context: DOCKERHUB_TOKEN
357
+ cu_version: cu121
358
+ name: linux_conda_py39_cu121_pyt231
359
+ python_version: '3.9'
360
+ pytorch_version: 2.3.1
361
+ - binary_linux_conda:
362
+ conda_docker_image: pytorch/conda-builder:cuda118
363
+ context: DOCKERHUB_TOKEN
364
+ cu_version: cu118
365
+ name: linux_conda_py39_cu118_pyt240
366
+ python_version: '3.9'
367
+ pytorch_version: 2.4.0
368
+ - binary_linux_conda:
369
+ conda_docker_image: pytorch/conda-builder:cuda121
370
+ context: DOCKERHUB_TOKEN
371
+ cu_version: cu121
372
+ name: linux_conda_py39_cu121_pyt240
373
+ python_version: '3.9'
374
+ pytorch_version: 2.4.0
375
+ - binary_linux_conda:
376
+ conda_docker_image: pytorch/conda-builder:cuda118
377
+ context: DOCKERHUB_TOKEN
378
+ cu_version: cu118
379
+ name: linux_conda_py39_cu118_pyt241
380
+ python_version: '3.9'
381
+ pytorch_version: 2.4.1
382
+ - binary_linux_conda:
383
+ conda_docker_image: pytorch/conda-builder:cuda121
384
+ context: DOCKERHUB_TOKEN
385
+ cu_version: cu121
386
+ name: linux_conda_py39_cu121_pyt241
387
+ python_version: '3.9'
388
+ pytorch_version: 2.4.1
389
+ - binary_linux_conda:
390
+ conda_docker_image: pytorch/conda-builder:cuda118
391
+ context: DOCKERHUB_TOKEN
392
+ cu_version: cu118
393
+ name: linux_conda_py310_cu118_pyt210
394
+ python_version: '3.10'
395
+ pytorch_version: 2.1.0
396
+ - binary_linux_conda:
397
+ conda_docker_image: pytorch/conda-builder:cuda121
398
+ context: DOCKERHUB_TOKEN
399
+ cu_version: cu121
400
+ name: linux_conda_py310_cu121_pyt210
401
+ python_version: '3.10'
402
+ pytorch_version: 2.1.0
403
+ - binary_linux_conda:
404
+ conda_docker_image: pytorch/conda-builder:cuda118
405
+ context: DOCKERHUB_TOKEN
406
+ cu_version: cu118
407
+ name: linux_conda_py310_cu118_pyt211
408
+ python_version: '3.10'
409
+ pytorch_version: 2.1.1
410
+ - binary_linux_conda:
411
+ conda_docker_image: pytorch/conda-builder:cuda121
412
+ context: DOCKERHUB_TOKEN
413
+ cu_version: cu121
414
+ name: linux_conda_py310_cu121_pyt211
415
+ python_version: '3.10'
416
+ pytorch_version: 2.1.1
417
+ - binary_linux_conda:
418
+ conda_docker_image: pytorch/conda-builder:cuda118
419
+ context: DOCKERHUB_TOKEN
420
+ cu_version: cu118
421
+ name: linux_conda_py310_cu118_pyt212
422
+ python_version: '3.10'
423
+ pytorch_version: 2.1.2
424
+ - binary_linux_conda:
425
+ conda_docker_image: pytorch/conda-builder:cuda121
426
+ context: DOCKERHUB_TOKEN
427
+ cu_version: cu121
428
+ name: linux_conda_py310_cu121_pyt212
429
+ python_version: '3.10'
430
+ pytorch_version: 2.1.2
431
+ - binary_linux_conda:
432
+ conda_docker_image: pytorch/conda-builder:cuda118
433
+ context: DOCKERHUB_TOKEN
434
+ cu_version: cu118
435
+ name: linux_conda_py310_cu118_pyt220
436
+ python_version: '3.10'
437
+ pytorch_version: 2.2.0
438
+ - binary_linux_conda:
439
+ conda_docker_image: pytorch/conda-builder:cuda121
440
+ context: DOCKERHUB_TOKEN
441
+ cu_version: cu121
442
+ name: linux_conda_py310_cu121_pyt220
443
+ python_version: '3.10'
444
+ pytorch_version: 2.2.0
445
+ - binary_linux_conda:
446
+ conda_docker_image: pytorch/conda-builder:cuda118
447
+ context: DOCKERHUB_TOKEN
448
+ cu_version: cu118
449
+ name: linux_conda_py310_cu118_pyt222
450
+ python_version: '3.10'
451
+ pytorch_version: 2.2.2
452
+ - binary_linux_conda:
453
+ conda_docker_image: pytorch/conda-builder:cuda121
454
+ context: DOCKERHUB_TOKEN
455
+ cu_version: cu121
456
+ name: linux_conda_py310_cu121_pyt222
457
+ python_version: '3.10'
458
+ pytorch_version: 2.2.2
459
+ - binary_linux_conda:
460
+ conda_docker_image: pytorch/conda-builder:cuda118
461
+ context: DOCKERHUB_TOKEN
462
+ cu_version: cu118
463
+ name: linux_conda_py310_cu118_pyt231
464
+ python_version: '3.10'
465
+ pytorch_version: 2.3.1
466
+ - binary_linux_conda:
467
+ conda_docker_image: pytorch/conda-builder:cuda121
468
+ context: DOCKERHUB_TOKEN
469
+ cu_version: cu121
470
+ name: linux_conda_py310_cu121_pyt231
471
+ python_version: '3.10'
472
+ pytorch_version: 2.3.1
473
+ - binary_linux_conda:
474
+ conda_docker_image: pytorch/conda-builder:cuda118
475
+ context: DOCKERHUB_TOKEN
476
+ cu_version: cu118
477
+ name: linux_conda_py310_cu118_pyt240
478
+ python_version: '3.10'
479
+ pytorch_version: 2.4.0
480
+ - binary_linux_conda:
481
+ conda_docker_image: pytorch/conda-builder:cuda121
482
+ context: DOCKERHUB_TOKEN
483
+ cu_version: cu121
484
+ name: linux_conda_py310_cu121_pyt240
485
+ python_version: '3.10'
486
+ pytorch_version: 2.4.0
487
+ - binary_linux_conda:
488
+ conda_docker_image: pytorch/conda-builder:cuda118
489
+ context: DOCKERHUB_TOKEN
490
+ cu_version: cu118
491
+ name: linux_conda_py310_cu118_pyt241
492
+ python_version: '3.10'
493
+ pytorch_version: 2.4.1
494
+ - binary_linux_conda:
495
+ conda_docker_image: pytorch/conda-builder:cuda121
496
+ context: DOCKERHUB_TOKEN
497
+ cu_version: cu121
498
+ name: linux_conda_py310_cu121_pyt241
499
+ python_version: '3.10'
500
+ pytorch_version: 2.4.1
501
+ - binary_linux_conda:
502
+ conda_docker_image: pytorch/conda-builder:cuda118
503
+ context: DOCKERHUB_TOKEN
504
+ cu_version: cu118
505
+ name: linux_conda_py311_cu118_pyt210
506
+ python_version: '3.11'
507
+ pytorch_version: 2.1.0
508
+ - binary_linux_conda:
509
+ conda_docker_image: pytorch/conda-builder:cuda121
510
+ context: DOCKERHUB_TOKEN
511
+ cu_version: cu121
512
+ name: linux_conda_py311_cu121_pyt210
513
+ python_version: '3.11'
514
+ pytorch_version: 2.1.0
515
+ - binary_linux_conda:
516
+ conda_docker_image: pytorch/conda-builder:cuda118
517
+ context: DOCKERHUB_TOKEN
518
+ cu_version: cu118
519
+ name: linux_conda_py311_cu118_pyt211
520
+ python_version: '3.11'
521
+ pytorch_version: 2.1.1
522
+ - binary_linux_conda:
523
+ conda_docker_image: pytorch/conda-builder:cuda121
524
+ context: DOCKERHUB_TOKEN
525
+ cu_version: cu121
526
+ name: linux_conda_py311_cu121_pyt211
527
+ python_version: '3.11'
528
+ pytorch_version: 2.1.1
529
+ - binary_linux_conda:
530
+ conda_docker_image: pytorch/conda-builder:cuda118
531
+ context: DOCKERHUB_TOKEN
532
+ cu_version: cu118
533
+ name: linux_conda_py311_cu118_pyt212
534
+ python_version: '3.11'
535
+ pytorch_version: 2.1.2
536
+ - binary_linux_conda:
537
+ conda_docker_image: pytorch/conda-builder:cuda121
538
+ context: DOCKERHUB_TOKEN
539
+ cu_version: cu121
540
+ name: linux_conda_py311_cu121_pyt212
541
+ python_version: '3.11'
542
+ pytorch_version: 2.1.2
543
+ - binary_linux_conda:
544
+ conda_docker_image: pytorch/conda-builder:cuda118
545
+ context: DOCKERHUB_TOKEN
546
+ cu_version: cu118
547
+ name: linux_conda_py311_cu118_pyt220
548
+ python_version: '3.11'
549
+ pytorch_version: 2.2.0
550
+ - binary_linux_conda:
551
+ conda_docker_image: pytorch/conda-builder:cuda121
552
+ context: DOCKERHUB_TOKEN
553
+ cu_version: cu121
554
+ name: linux_conda_py311_cu121_pyt220
555
+ python_version: '3.11'
556
+ pytorch_version: 2.2.0
557
+ - binary_linux_conda:
558
+ conda_docker_image: pytorch/conda-builder:cuda118
559
+ context: DOCKERHUB_TOKEN
560
+ cu_version: cu118
561
+ name: linux_conda_py311_cu118_pyt222
562
+ python_version: '3.11'
563
+ pytorch_version: 2.2.2
564
+ - binary_linux_conda:
565
+ conda_docker_image: pytorch/conda-builder:cuda121
566
+ context: DOCKERHUB_TOKEN
567
+ cu_version: cu121
568
+ name: linux_conda_py311_cu121_pyt222
569
+ python_version: '3.11'
570
+ pytorch_version: 2.2.2
571
+ - binary_linux_conda:
572
+ conda_docker_image: pytorch/conda-builder:cuda118
573
+ context: DOCKERHUB_TOKEN
574
+ cu_version: cu118
575
+ name: linux_conda_py311_cu118_pyt231
576
+ python_version: '3.11'
577
+ pytorch_version: 2.3.1
578
+ - binary_linux_conda:
579
+ conda_docker_image: pytorch/conda-builder:cuda121
580
+ context: DOCKERHUB_TOKEN
581
+ cu_version: cu121
582
+ name: linux_conda_py311_cu121_pyt231
583
+ python_version: '3.11'
584
+ pytorch_version: 2.3.1
585
+ - binary_linux_conda:
586
+ conda_docker_image: pytorch/conda-builder:cuda118
587
+ context: DOCKERHUB_TOKEN
588
+ cu_version: cu118
589
+ name: linux_conda_py311_cu118_pyt240
590
+ python_version: '3.11'
591
+ pytorch_version: 2.4.0
592
+ - binary_linux_conda:
593
+ conda_docker_image: pytorch/conda-builder:cuda121
594
+ context: DOCKERHUB_TOKEN
595
+ cu_version: cu121
596
+ name: linux_conda_py311_cu121_pyt240
597
+ python_version: '3.11'
598
+ pytorch_version: 2.4.0
599
+ - binary_linux_conda:
600
+ conda_docker_image: pytorch/conda-builder:cuda118
601
+ context: DOCKERHUB_TOKEN
602
+ cu_version: cu118
603
+ name: linux_conda_py311_cu118_pyt241
604
+ python_version: '3.11'
605
+ pytorch_version: 2.4.1
606
+ - binary_linux_conda:
607
+ conda_docker_image: pytorch/conda-builder:cuda121
608
+ context: DOCKERHUB_TOKEN
609
+ cu_version: cu121
610
+ name: linux_conda_py311_cu121_pyt241
611
+ python_version: '3.11'
612
+ pytorch_version: 2.4.1
613
+ - binary_linux_conda:
614
+ conda_docker_image: pytorch/conda-builder:cuda118
615
+ context: DOCKERHUB_TOKEN
616
+ cu_version: cu118
617
+ name: linux_conda_py312_cu118_pyt220
618
+ python_version: '3.12'
619
+ pytorch_version: 2.2.0
620
+ - binary_linux_conda:
621
+ conda_docker_image: pytorch/conda-builder:cuda121
622
+ context: DOCKERHUB_TOKEN
623
+ cu_version: cu121
624
+ name: linux_conda_py312_cu121_pyt220
625
+ python_version: '3.12'
626
+ pytorch_version: 2.2.0
627
+ - binary_linux_conda:
628
+ conda_docker_image: pytorch/conda-builder:cuda118
629
+ context: DOCKERHUB_TOKEN
630
+ cu_version: cu118
631
+ name: linux_conda_py312_cu118_pyt222
632
+ python_version: '3.12'
633
+ pytorch_version: 2.2.2
634
+ - binary_linux_conda:
635
+ conda_docker_image: pytorch/conda-builder:cuda121
636
+ context: DOCKERHUB_TOKEN
637
+ cu_version: cu121
638
+ name: linux_conda_py312_cu121_pyt222
639
+ python_version: '3.12'
640
+ pytorch_version: 2.2.2
641
+ - binary_linux_conda:
642
+ conda_docker_image: pytorch/conda-builder:cuda118
643
+ context: DOCKERHUB_TOKEN
644
+ cu_version: cu118
645
+ name: linux_conda_py312_cu118_pyt231
646
+ python_version: '3.12'
647
+ pytorch_version: 2.3.1
648
+ - binary_linux_conda:
649
+ conda_docker_image: pytorch/conda-builder:cuda121
650
+ context: DOCKERHUB_TOKEN
651
+ cu_version: cu121
652
+ name: linux_conda_py312_cu121_pyt231
653
+ python_version: '3.12'
654
+ pytorch_version: 2.3.1
655
+ - binary_linux_conda:
656
+ conda_docker_image: pytorch/conda-builder:cuda118
657
+ context: DOCKERHUB_TOKEN
658
+ cu_version: cu118
659
+ name: linux_conda_py312_cu118_pyt240
660
+ python_version: '3.12'
661
+ pytorch_version: 2.4.0
662
+ - binary_linux_conda:
663
+ conda_docker_image: pytorch/conda-builder:cuda121
664
+ context: DOCKERHUB_TOKEN
665
+ cu_version: cu121
666
+ name: linux_conda_py312_cu121_pyt240
667
+ python_version: '3.12'
668
+ pytorch_version: 2.4.0
669
+ - binary_linux_conda:
670
+ conda_docker_image: pytorch/conda-builder:cuda118
671
+ context: DOCKERHUB_TOKEN
672
+ cu_version: cu118
673
+ name: linux_conda_py312_cu118_pyt241
674
+ python_version: '3.12'
675
+ pytorch_version: 2.4.1
676
+ - binary_linux_conda:
677
+ conda_docker_image: pytorch/conda-builder:cuda121
678
+ context: DOCKERHUB_TOKEN
679
+ cu_version: cu121
680
+ name: linux_conda_py312_cu121_pyt241
681
+ python_version: '3.12'
682
+ pytorch_version: 2.4.1
683
+ - binary_linux_conda_cuda:
684
+ name: testrun_conda_cuda_py310_cu117_pyt201
685
+ context: DOCKERHUB_TOKEN
686
+ python_version: "3.10"
687
+ pytorch_version: '2.0.1'
688
+ cu_version: "cu117"
pytorch3d-0.7.8/.circleci/regenerate.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This script is adapted from the torchvision one.
10
+ """
11
+
12
+ import os.path
13
+
14
+ import jinja2
15
+ import yaml
16
+ from packaging import version
17
+
18
+
19
+ # The CUDA versions which have pytorch conda packages available for linux for each
20
+ # version of pytorch.
21
+ CONDA_CUDA_VERSIONS = {
22
+ "2.1.0": ["cu118", "cu121"],
23
+ "2.1.1": ["cu118", "cu121"],
24
+ "2.1.2": ["cu118", "cu121"],
25
+ "2.2.0": ["cu118", "cu121"],
26
+ "2.2.2": ["cu118", "cu121"],
27
+ "2.3.1": ["cu118", "cu121"],
28
+ "2.4.0": ["cu118", "cu121"],
29
+ "2.4.1": ["cu118", "cu121"],
30
+ }
31
+
32
+
33
+ def conda_docker_image_for_cuda(cuda_version):
34
+ if len(cuda_version) != 5:
35
+ raise ValueError("Unknown cuda version")
36
+ return "pytorch/conda-builder:cuda" + cuda_version[2:]
37
+
38
+
39
+ def pytorch_versions_for_python(python_version):
40
+ if python_version in ["3.8", "3.9"]:
41
+ return list(CONDA_CUDA_VERSIONS)
42
+ if python_version == "3.10":
43
+ return [
44
+ i
45
+ for i in CONDA_CUDA_VERSIONS
46
+ if version.Version(i) >= version.Version("1.11.0")
47
+ ]
48
+ if python_version == "3.11":
49
+ return [
50
+ i
51
+ for i in CONDA_CUDA_VERSIONS
52
+ if version.Version(i) >= version.Version("2.1.0")
53
+ ]
54
+ if python_version == "3.12":
55
+ return [
56
+ i
57
+ for i in CONDA_CUDA_VERSIONS
58
+ if version.Version(i) >= version.Version("2.2.0")
59
+ ]
60
+
61
+
62
+ def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
63
+ w = []
64
+ for btype in ["conda"]:
65
+ for python_version in ["3.8", "3.9", "3.10", "3.11", "3.12"]:
66
+ for pytorch_version in pytorch_versions_for_python(python_version):
67
+ for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
68
+ w += workflow_pair(
69
+ btype=btype,
70
+ python_version=python_version,
71
+ pytorch_version=pytorch_version,
72
+ cu_version=cu_version,
73
+ prefix=prefix,
74
+ upload=upload,
75
+ filter_branch=filter_branch,
76
+ )
77
+
78
+ return indent(indentation, w)
79
+
80
+
81
+ def workflow_pair(
82
+ *,
83
+ btype,
84
+ python_version,
85
+ pytorch_version,
86
+ cu_version,
87
+ prefix="",
88
+ upload=False,
89
+ filter_branch,
90
+ ):
91
+
92
+ w = []
93
+ py = python_version.replace(".", "")
94
+ pyt = pytorch_version.replace(".", "")
95
+ base_workflow_name = f"{prefix}linux_{btype}_py{py}_{cu_version}_pyt{pyt}"
96
+
97
+ w.append(
98
+ generate_base_workflow(
99
+ base_workflow_name=base_workflow_name,
100
+ python_version=python_version,
101
+ pytorch_version=pytorch_version,
102
+ cu_version=cu_version,
103
+ btype=btype,
104
+ filter_branch=filter_branch,
105
+ )
106
+ )
107
+
108
+ if upload:
109
+ w.append(
110
+ generate_upload_workflow(
111
+ base_workflow_name=base_workflow_name,
112
+ btype=btype,
113
+ cu_version=cu_version,
114
+ filter_branch=filter_branch,
115
+ )
116
+ )
117
+
118
+ return w
119
+
120
+
121
+ def generate_base_workflow(
122
+ *,
123
+ base_workflow_name,
124
+ python_version,
125
+ cu_version,
126
+ pytorch_version,
127
+ btype,
128
+ filter_branch=None,
129
+ ):
130
+
131
+ d = {
132
+ "name": base_workflow_name,
133
+ "python_version": python_version,
134
+ "cu_version": cu_version,
135
+ "pytorch_version": pytorch_version,
136
+ "context": "DOCKERHUB_TOKEN",
137
+ }
138
+
139
+ conda_docker_image = conda_docker_image_for_cuda(cu_version)
140
+ if conda_docker_image is not None:
141
+ d["conda_docker_image"] = conda_docker_image
142
+
143
+ if filter_branch is not None:
144
+ d["filters"] = {"branches": {"only": filter_branch}}
145
+
146
+ return {f"binary_linux_{btype}": d}
147
+
148
+
149
+ def generate_upload_workflow(*, base_workflow_name, btype, cu_version, filter_branch):
150
+ d = {
151
+ "name": f"{base_workflow_name}_upload",
152
+ "context": "org-member",
153
+ "requires": [base_workflow_name],
154
+ }
155
+
156
+ if btype == "wheel":
157
+ d["subfolder"] = cu_version + "/"
158
+
159
+ if filter_branch is not None:
160
+ d["filters"] = {"branches": {"only": filter_branch}}
161
+
162
+ return {f"binary_{btype}_upload": d}
163
+
164
+
165
+ def indent(indentation, data_list):
166
+ if len(data_list) == 0:
167
+ return ""
168
+ return ("\n" + " " * indentation).join(
169
+ yaml.dump(data_list, default_flow_style=False).splitlines()
170
+ )
171
+
172
+
173
+ if __name__ == "__main__":
174
+ d = os.path.dirname(__file__)
175
+ env = jinja2.Environment(
176
+ loader=jinja2.FileSystemLoader(d),
177
+ lstrip_blocks=True,
178
+ autoescape=False,
179
+ keep_trailing_newline=True,
180
+ )
181
+
182
+ with open(os.path.join(d, "config.yml"), "w") as f:
183
+ f.write(env.get_template("config.in.yml").render(workflows=workflows))
pytorch3d-0.7.8/.clang-format ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AccessModifierOffset: -1
2
+ AlignAfterOpenBracket: AlwaysBreak
3
+ AlignConsecutiveAssignments: false
4
+ AlignConsecutiveDeclarations: false
5
+ AlignEscapedNewlinesLeft: true
6
+ AlignOperands: false
7
+ AlignTrailingComments: false
8
+ AllowAllParametersOfDeclarationOnNextLine: false
9
+ AllowShortBlocksOnASingleLine: false
10
+ AllowShortCaseLabelsOnASingleLine: false
11
+ AllowShortFunctionsOnASingleLine: Empty
12
+ AllowShortIfStatementsOnASingleLine: false
13
+ AllowShortLoopsOnASingleLine: false
14
+ AlwaysBreakAfterReturnType: None
15
+ AlwaysBreakBeforeMultilineStrings: true
16
+ AlwaysBreakTemplateDeclarations: true
17
+ BinPackArguments: false
18
+ BinPackParameters: false
19
+ BraceWrapping:
20
+ AfterClass: false
21
+ AfterControlStatement: false
22
+ AfterEnum: false
23
+ AfterFunction: false
24
+ AfterNamespace: false
25
+ AfterObjCDeclaration: false
26
+ AfterStruct: false
27
+ AfterUnion: false
28
+ BeforeCatch: false
29
+ BeforeElse: false
30
+ IndentBraces: false
31
+ BreakBeforeBinaryOperators: None
32
+ BreakBeforeBraces: Attach
33
+ BreakBeforeTernaryOperators: true
34
+ BreakConstructorInitializersBeforeComma: false
35
+ BreakAfterJavaFieldAnnotations: false
36
+ BreakStringLiterals: false
37
+ ColumnLimit: 80
38
+ CommentPragmas: '^ IWYU pragma:'
39
+ ConstructorInitializerAllOnOneLineOrOnePerLine: true
40
+ ConstructorInitializerIndentWidth: 4
41
+ ContinuationIndentWidth: 4
42
+ Cpp11BracedListStyle: true
43
+ DerivePointerAlignment: false
44
+ DisableFormat: false
45
+ ForEachMacros: [ FOR_EACH, FOR_EACH_R, FOR_EACH_RANGE, ]
46
+ IncludeCategories:
47
+ - Regex: '^<.*\.h(pp)?>'
48
+ Priority: 1
49
+ - Regex: '^<.*'
50
+ Priority: 2
51
+ - Regex: '.*'
52
+ Priority: 3
53
+ IndentCaseLabels: true
54
+ IndentWidth: 2
55
+ IndentWrappedFunctionNames: false
56
+ KeepEmptyLinesAtTheStartOfBlocks: false
57
+ MacroBlockBegin: ''
58
+ MacroBlockEnd: ''
59
+ MaxEmptyLinesToKeep: 1
60
+ NamespaceIndentation: None
61
+ ObjCBlockIndentWidth: 2
62
+ ObjCSpaceAfterProperty: false
63
+ ObjCSpaceBeforeProtocolList: false
64
+ PenaltyBreakBeforeFirstCallParameter: 1
65
+ PenaltyBreakComment: 300
66
+ PenaltyBreakFirstLessLess: 120
67
+ PenaltyBreakString: 1000
68
+ PenaltyExcessCharacter: 1000000
69
+ PenaltyReturnTypeOnItsOwnLine: 200
70
+ PointerAlignment: Left
71
+ ReflowComments: true
72
+ SortIncludes: true
73
+ SpaceAfterCStyleCast: false
74
+ SpaceBeforeAssignmentOperators: true
75
+ SpaceBeforeParens: ControlStatements
76
+ SpaceInEmptyParentheses: false
77
+ SpacesBeforeTrailingComments: 1
78
+ SpacesInAngles: false
79
+ SpacesInContainerLiterals: true
80
+ SpacesInCStyleCastParentheses: false
81
+ SpacesInParentheses: false
82
+ SpacesInSquareBrackets: false
83
+ Standard: Cpp11
84
+ TabWidth: 8
85
+ UseTab: Never
pytorch3d-0.7.8/.flake8 ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ # B028 No explicit stacklevel argument found.
3
+ # B907 'foo' is manually surrounded by quotes, consider using the `!r` conversion flag.
4
+ # B905 `zip()` without an explicit `strict=` parameter.
5
+ ignore = E203, E266, E501, W503, E221, B028, B905, B907
6
+ max-line-length = 88
7
+ max-complexity = 18
8
+ select = B,C,E,F,W,T4,B9
9
+ exclude = build,__init__.py
pytorch3d-0.7.8/.github/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as
6
+ contributors and maintainers pledge to make participation in our project and
7
+ our community a harassment-free experience for everyone, regardless of age, body
8
+ size, disability, ethnicity, sex characteristics, gender identity and expression,
9
+ level of experience, education, socio-economic status, nationality, personal
10
+ appearance, race, religion, or sexual identity and orientation.
11
+
12
+ ## Our Standards
13
+
14
+ Examples of behavior that contributes to creating a positive environment
15
+ include:
16
+
17
+ * Using welcoming and inclusive language
18
+ * Being respectful of differing viewpoints and experiences
19
+ * Gracefully accepting constructive criticism
20
+ * Focusing on what is best for the community
21
+ * Showing empathy towards other community members
22
+
23
+ Examples of unacceptable behavior by participants include:
24
+
25
+ * The use of sexualized language or imagery and unwelcome sexual attention or
26
+ advances
27
+ * Trolling, insulting/derogatory comments, and personal or political attacks
28
+ * Public or private harassment
29
+ * Publishing others' private information, such as a physical or electronic
30
+ address, without explicit permission
31
+ * Other conduct which could reasonably be considered inappropriate in a
32
+ professional setting
33
+
34
+ ## Our Responsibilities
35
+
36
+ Project maintainers are responsible for clarifying the standards of acceptable
37
+ behavior and are expected to take appropriate and fair corrective action in
38
+ response to any instances of unacceptable behavior.
39
+
40
+ Project maintainers have the right and responsibility to remove, edit, or
41
+ reject comments, commits, code, wiki edits, issues, and other contributions
42
+ that are not aligned to this Code of Conduct, or to ban temporarily or
43
+ permanently any contributor for other behaviors that they deem inappropriate,
44
+ threatening, offensive, or harmful.
45
+
46
+ ## Scope
47
+
48
+ This Code of Conduct applies within all project spaces, and it also applies when
49
+ an individual is representing the project or its community in public spaces.
50
+ Examples of representing a project or community include using an official
51
+ project e-mail address, posting via an official social media account, or acting
52
+ as an appointed representative at an online or offline event. Representation of
53
+ a project may be further defined and clarified by project maintainers.
54
+
55
+ ## Enforcement
56
+
57
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
58
+ reported by contacting the project team at <opensource-conduct@fb.com>. All
59
+ complaints will be reviewed and investigated and will result in a response that
60
+ is deemed necessary and appropriate to the circumstances. The project team is
61
+ obligated to maintain confidentiality with regard to the reporter of an incident.
62
+ Further details of specific enforcement policies may be posted separately.
63
+
64
+ Project maintainers who do not follow or enforce the Code of Conduct in good
65
+ faith may face temporary or permanent repercussions as determined by other
66
+ members of the project's leadership.
67
+
68
+ ## Attribution
69
+
70
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71
+ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72
+
73
+ [homepage]: https://www.contributor-covenant.org
74
+
75
+ For answers to common questions about this code of conduct, see
76
+ https://www.contributor-covenant.org/faq
pytorch3d-0.7.8/.github/CONTRIBUTING.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to PyTorch3D
2
+ We want to make contributing to this project as easy and transparent as
3
+ possible.
4
+
5
+ ## Pull Requests
6
+ We actively welcome your pull requests.
7
+
8
+ However, if you're adding any significant features, please make sure to have a corresponding issue to outline your proposal and motivation and allow time for us to give feedback, *before* you send a PR.
9
+ We do not always accept new features, and we take the following factors into consideration:
10
+
11
+ - Whether the same feature can be achieved without modifying PyTorch3D directly. If any aspect of the API is not extensible, please highlight this in an issue so we can work on making this more extensible.
12
+ - Whether the feature is potentially useful to a large audience, or only to a small portion of users.
13
+ - Whether the proposed solution has a good design and interface.
14
+ - Whether the proposed solution adds extra mental/practical overhead to users who don't need such feature.
15
+ - Whether the proposed solution breaks existing APIs.
16
+
17
+ When sending a PR, please ensure you complete the following steps:
18
+
19
+ 1. Fork the repo and create your branch from `main`. Follow the instructions
20
+ in [INSTALL.md](../INSTALL.md) to build the repo.
21
+ 2. If you've added code that should be tested, add tests.
22
+ 3. If you've changed any APIs, please update the documentation.
23
+ 4. Ensure the test suite passes, by running this from the project root:
24
+ ```
25
+ python -m unittest discover -v -s tests -t .
26
+ ```
27
+ 5. Make sure your code lints by running `dev/linter.sh` from the project root.
28
+ 6. If a PR contains multiple orthogonal changes, split it into multiple separate PRs.
29
+ 7. If you haven't already, complete the Contributor License Agreement ("CLA").
30
+
31
+ ## Contributor License Agreement ("CLA")
32
+ In order to accept your pull request, we need you to submit a CLA. You only need
33
+ to do this once to work on any of Facebook's open source projects.
34
+
35
+ Complete your CLA here: <https://code.facebook.com/cla>
36
+
37
+ ## Issues
38
+ We use GitHub issues to track public bugs. Please ensure your description is
39
+ clear and has sufficient instructions to be able to reproduce the issue.
40
+
41
+ Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
42
+ disclosure of security bugs. In those cases, please go through the process
43
+ outlined on that page and do not file a public issue.
44
+
45
+ ## Coding Style
46
+ We follow these [python](http://google.github.io/styleguide/pyguide.html) and [C++](https://google.github.io/styleguide/cppguide.html) style guides.
47
+
48
+ For the linter to work, you will need to install `black`, `flake`, `usort` and `clang-format`, and
49
+ they need to be fairly up to date.
50
+
51
+ ## License
52
+ By contributing to PyTorch3D, you agree that your contributions will be licensed
53
+ under the LICENSE file in the root directory of this source tree.
pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/bugs.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "🐛 Bugs / Unexpected behaviors"
3
+ about: Please report unexpected behaviors or bugs in PyTorch3D.
4
+
5
+ ---
6
+
7
+ If you do not know the root cause of the problem / bug, and wish someone to help you, please
8
+ post according to this template:
9
+
10
+ ## 🐛 Bugs / Unexpected behaviors
11
+ <!-- A clear and concise description of the issue -->
12
+
13
+ NOTE: Please look at the existing list of Issues tagged with the label ['bug`](https://github.com/facebookresearch/pytorch3d/issues?q=label%3Abug). **Only open a new issue if this bug has not already been reported. If an issue already exists, please comment there instead.**.
14
+
15
+ ## Instructions To Reproduce the Issue:
16
+
17
+ Please include the following (depending on what the issue is):
18
+
19
+ 1. Any changes you made (`git diff`) or code you wrote
20
+ ```
21
+ <put diff or code here>
22
+ ```
23
+ 2. The exact command(s) you ran:
24
+ 3. What you observed (including the full logs):
25
+ ```
26
+ <put logs here>
27
+ ```
28
+
29
+ Please also simplify the steps as much as possible so they do not require additional resources to
30
+ run, such as a private dataset.
pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1 @@
 
 
1
+ blank_issues_enabled: false
pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/feature_request.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "\U0001F680 Feature Request"
3
+ about: Submit a proposal/request for a new PyTorch3D feature
4
+
5
+ ---
6
+
7
+ ## 🚀 Feature
8
+ <!-- A clear and concise description of the feature proposal -->
9
+
10
+ NOTE: Please look at the existing list of Issues tagged with the label ['enhancement`](https://github.com/facebookresearch/pytorch3d/issues?q=label%3Aenhancement). **Only open a new issue if you do not see your feature request there**.
11
+
12
+ ## Motivation
13
+
14
+ <!-- Please outline the motivation for the proposal.
15
+ e.g. It would be great if I could do [...], I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
16
+
17
+ ## Pitch
18
+
19
+ <!-- A clear and concise description, optionally with code examples showing the functionality you want. -->
20
+
21
+ NOTE: we only consider adding new features if they are useful for many users.
pytorch3d-0.7.8/.github/ISSUE_TEMPLATE/questions-help.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "❓ Questions"
3
+ about: How do I do X with PyTorch3D? How does PyTorch3D do X?
4
+
5
+ ---
6
+
7
+ ## ❓ Questions on how to use PyTorch3D
8
+
9
+ <!-- A clear and concise description of the question you need help with. -->
10
+
11
+
12
+ NOTE: Please look at the existing list of Issues tagged with the label ['question`](https://github.com/facebookresearch/pytorch3d/issues?q=label%3Aquestion) or ['how-to`](https://github.com/facebookresearch/pytorch3d/issues?q=label%3A%22how+to%22). **Only open a new issue if you cannot find an answer there**.
13
+
14
+ Also note the following:
15
+
16
+ 1. If you encountered any errors or unexpected issues while using PyTorch3D and need help resolving them,
17
+ please use the "Bugs / Unexpected behaviors" issue template.
18
+
19
+ 2. We do not answer general machine learning / computer vision questions that are not specific to
20
+ PyTorch3D, such as how a model works or what algorithm/methods can be
21
+ used to achieve X.
pytorch3d-0.7.8/.gitignore ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ build/
2
+ dist/
3
+ *.egg-info/
4
+ **/__pycache__/
5
+ *-checkpoint.ipynb
6
+ **/.ipynb_checkpoints
7
+ **/.ipynb_checkpoints/**
8
+
9
+
10
+ # Docusaurus site
11
+ website/yarn.lock
12
+ website/build/
13
+ website/i18n/
14
+ website/node_modules/*
15
+ website/npm-debug.log
16
+
17
+ ## Generated for tutorials
18
+ website/_tutorials/
19
+ website/static/files/
20
+ website/pages/tutorials/*
21
+ !website/pages/tutorials/index.js
pytorch3d-0.7.8/INSTALL.md ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Installation
2
+
3
+
4
+ ## Requirements
5
+
6
+ ### Core library
7
+
8
+ The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/PyTorch. It is advised to use PyTorch3D with GPU support in order to use all the features.
9
+
10
+ - Linux or macOS or Windows
11
+ - Python
12
+ - PyTorch 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0, 2.3.1, 2.4.0 or 2.4.1.
13
+ - torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
14
+ - gcc & g++ ≥ 4.9
15
+ - [ioPath](https://github.com/facebookresearch/iopath)
16
+ - If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
17
+ - If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
18
+
19
+ The runtime dependencies can be installed by running:
20
+ ```
21
+ conda create -n pytorch3d python=3.9
22
+ conda activate pytorch3d
23
+ conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
24
+ conda install -c iopath iopath
25
+ ```
26
+
27
+ For the CUB build time dependency, which you only need if you have CUDA older than 11.7, if you are using conda, you can continue with
28
+ ```
29
+ conda install -c bottler nvidiacub
30
+ ```
31
+ Otherwise download the CUB library from https://github.com/NVIDIA/cub/releases and unpack it to a folder of your choice.
32
+ Define the environment variable CUB_HOME before building and point it to the directory that contains `CMakeLists.txt` for CUB.
33
+ For example on Linux/Mac,
34
+ ```
35
+ curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
36
+ tar xzf 1.10.0.tar.gz
37
+ export CUB_HOME=$PWD/cub-1.10.0
38
+ ```
39
+
40
+ ### Tests/Linting and Demos
41
+
42
+ For developing on top of PyTorch3D or contributing, you will need to run the linter and tests. If you want to run any of the notebook tutorials as `docs/tutorials` or the examples in `docs/examples` you will also need matplotlib and OpenCV.
43
+ - scikit-image
44
+ - black
45
+ - usort
46
+ - flake8
47
+ - matplotlib
48
+ - tdqm
49
+ - jupyter
50
+ - imageio
51
+ - fvcore
52
+ - plotly
53
+ - opencv-python
54
+
55
+ These can be installed by running:
56
+ ```
57
+ # Demos and examples
58
+ conda install jupyter
59
+ pip install scikit-image matplotlib imageio plotly opencv-python
60
+
61
+ # Tests/Linting
62
+ conda install -c fvcore -c conda-forge fvcore
63
+ pip install black usort flake8 flake8-bugbear flake8-comprehensions
64
+ ```
65
+
66
+ ## Installing prebuilt binaries for PyTorch3D
67
+ After installing the above dependencies, run one of the following commands:
68
+
69
+ ### 1. Install with CUDA support from Anaconda Cloud, on Linux only
70
+
71
+ ```
72
+ # Anaconda Cloud
73
+ conda install pytorch3d -c pytorch3d
74
+ ```
75
+
76
+ Or, to install a nightly (non-official, alpha) build:
77
+ ```
78
+ # Anaconda Cloud
79
+ conda install pytorch3d -c pytorch3d-nightly
80
+ ```
81
+
82
+ ### 2. Install wheels for Linux
83
+ We have prebuilt wheels with CUDA for Linux for PyTorch 1.11.0, for each of the supported CUDA versions,
84
+ for Python 3.8 and 3.9. This is for ease of use on Google Colab.
85
+ These are installed in a special way.
86
+ For example, to install for Python 3.8, PyTorch 1.11.0 and CUDA 11.3
87
+ ```
88
+ pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html
89
+ ```
90
+
91
+ In general, from inside IPython, or in Google Colab or a jupyter notebook, you can install with
92
+ ```
93
+ import sys
94
+ import torch
95
+ pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
96
+ version_str="".join([
97
+ f"py3{sys.version_info.minor}_cu",
98
+ torch.version.cuda.replace(".",""),
99
+ f"_pyt{pyt_version_str}"
100
+ ])
101
+ !pip install iopath
102
+ !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
103
+ ```
104
+
105
+ ## Building / installing from source.
106
+ CUDA support will be included if CUDA is available in pytorch or if the environment variable
107
+ `FORCE_CUDA` is set to `1`.
108
+
109
+ ### 1. Install from GitHub
110
+ ```
111
+ pip install "git+https://github.com/facebookresearch/pytorch3d.git"
112
+ ```
113
+ To install using the code of the released version instead of from the main branch, use the following instead.
114
+ ```
115
+ pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable"
116
+ ```
117
+
118
+ For CUDA builds with versions earlier than CUDA 11, set `CUB_HOME` before building as described above.
119
+
120
+ **Install from Github on macOS:**
121
+ Some environment variables should be provided, like this.
122
+ ```
123
+ MACOSX_DEPLOYMENT_TARGET=10.14 CC=clang CXX=clang++ pip install "git+https://github.com/facebookresearch/pytorch3d.git"
124
+ ```
125
+
126
+ ### 2. Install from a local clone
127
+ ```
128
+ git clone https://github.com/facebookresearch/pytorch3d.git
129
+ cd pytorch3d && pip install -e .
130
+ ```
131
+ To rebuild after installing from a local clone run, `rm -rf build/ **/*.so` then `pip install -e .`. You often need to rebuild pytorch3d after reinstalling PyTorch. For CUDA builds with versions earlier than CUDA 11, set `CUB_HOME` before building as described above.
132
+
133
+ **Install from local clone on macOS:**
134
+ ```
135
+ MACOSX_DEPLOYMENT_TARGET=10.14 CC=clang CXX=clang++ pip install -e .
136
+ ```
137
+
138
+ **Install from local clone on Windows:**
139
+
140
+ Depending on the version of PyTorch, changes to some PyTorch headers may be needed before compilation. These are often discussed in issues in this repository.
141
+
142
+ After any necessary patching, you can go to "x64 Native Tools Command Prompt for VS 2019" to compile and install
143
+ ```
144
+ cd pytorch3d
145
+ python3 setup.py install
146
+ ```
147
+
148
+ After installing, you can run **unit tests**
149
+ ```
150
+ python3 -m unittest discover -v -s tests -t .
151
+ ```
152
+
153
+ # FAQ
154
+
155
+ ### Can I use Docker?
156
+
157
+ We don't provide a docker file but see [#113](https://github.com/facebookresearch/pytorch3d/issues/113) for a docker file shared by a user (NOTE: this has not been tested by the PyTorch3D team).
pytorch3d-0.7.8/LICENSE ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD License
2
+
3
+ For PyTorch3D software
4
+
5
+ Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
6
+
7
+ Redistribution and use in source and binary forms, with or without modification,
8
+ are permitted provided that the following conditions are met:
9
+
10
+ * Redistributions of source code must retain the above copyright notice, this
11
+ list of conditions and the following disclaimer.
12
+
13
+ * Redistributions in binary form must reproduce the above copyright notice,
14
+ this list of conditions and the following disclaimer in the documentation
15
+ and/or other materials provided with the distribution.
16
+
17
+ * Neither the name Meta nor the names of its contributors may be used to
18
+ endorse or promote products derived from this software without specific
19
+ prior written permission.
20
+
21
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
22
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
25
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
28
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pytorch3d-0.7.8/LICENSE-3RD-PARTY ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SRN license ( https://github.com/vsitzmann/scene-representation-networks/ ):
2
+
3
+ MIT License
4
+
5
+ Copyright (c) 2019 Vincent Sitzmann
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the "Software"), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
24
+
25
+
26
+ IDR license ( github.com/lioryariv/idr ):
27
+
28
+ MIT License
29
+
30
+ Copyright (c) 2020 Lior Yariv
31
+
32
+ Permission is hereby granted, free of charge, to any person obtaining a copy
33
+ of this software and associated documentation files (the "Software"), to deal
34
+ in the Software without restriction, including without limitation the rights
35
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
36
+ copies of the Software, and to permit persons to whom the Software is
37
+ furnished to do so, subject to the following conditions:
38
+
39
+ The above copyright notice and this permission notice shall be included in all
40
+ copies or substantial portions of the Software.
41
+
42
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
43
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
44
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
45
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
46
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
47
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
48
+ SOFTWARE.
49
+
50
+
51
+ NeRF https://github.com/bmild/nerf/
52
+
53
+ Copyright (c) 2020 bmild
54
+
55
+ Permission is hereby granted, free of charge, to any person obtaining a copy
56
+ of this software and associated documentation files (the "Software"), to deal
57
+ in the Software without restriction, including without limitation the rights
58
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
59
+ copies of the Software, and to permit persons to whom the Software is
60
+ furnished to do so, subject to the following conditions:
61
+
62
+ The above copyright notice and this permission notice shall be included in all
63
+ copies or substantial portions of the Software.
64
+
65
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
66
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
67
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
68
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
69
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
70
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
71
+ SOFTWARE.
pytorch3d-0.7.8/README.md ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/pytorch3dlogo.png" width="900"/>
2
+
3
+ [![CircleCI](https://circleci.com/gh/facebookresearch/pytorch3d.svg?style=svg)](https://circleci.com/gh/facebookresearch/pytorch3d)
4
+ [![Anaconda-Server Badge](https://anaconda.org/pytorch3d/pytorch3d/badges/version.svg)](https://anaconda.org/pytorch3d/pytorch3d)
5
+
6
+ # Introduction
7
+
8
+ PyTorch3D provides efficient, reusable components for 3D Computer Vision research with [PyTorch](https://pytorch.org).
9
+
10
+ Key features include:
11
+
12
+ - Data structure for storing and manipulating triangle meshes
13
+ - Efficient operations on triangle meshes (projective transformations, graph convolution, sampling, loss functions)
14
+ - A differentiable mesh renderer
15
+ - Implicitron, see [its README](projects/implicitron_trainer), a framework for new-view synthesis via implicit representations. ([blog post](https://ai.facebook.com/blog/implicitron-a-new-modular-extensible-framework-for-neural-implicit-representations-in-pytorch3d/))
16
+
17
+ PyTorch3D is designed to integrate smoothly with deep learning methods for predicting and manipulating 3D data.
18
+ For this reason, all operators in PyTorch3D:
19
+
20
+ - Are implemented using PyTorch tensors
21
+ - Can handle minibatches of hetereogenous data
22
+ - Can be differentiated
23
+ - Can utilize GPUs for acceleration
24
+
25
+ Within FAIR, PyTorch3D has been used to power research projects such as [Mesh R-CNN](https://arxiv.org/abs/1906.02739).
26
+
27
+ See our [blog post](https://ai.facebook.com/blog/-introducing-pytorch3d-an-open-source-library-for-3d-deep-learning/) to see more demos and learn about PyTorch3D.
28
+
29
+ ## Installation
30
+
31
+ For detailed instructions refer to [INSTALL.md](INSTALL.md).
32
+
33
+ ## License
34
+
35
+ PyTorch3D is released under the [BSD License](LICENSE).
36
+
37
+ ## Tutorials
38
+
39
+ Get started with PyTorch3D by trying one of the tutorial notebooks.
40
+
41
+ |<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/dolphin_deform.gif" width="310"/>|<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/bundle_adjust.gif" width="310"/>|
42
+ |:-----------------------------------------------------------------------------------------------------------:|:--------------------------------------------------:|
43
+ | [Deform a sphere mesh to dolphin](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/deform_source_mesh_to_target_mesh.ipynb)| [Bundle adjustment](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/bundle_adjustment.ipynb) |
44
+
45
+ | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/render_textured_mesh.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/camera_position_teapot.gif" width="310" height="310"/>
46
+ |:------------------------------------------------------------:|:--------------------------------------------------:|
47
+ | [Render textured meshes](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_textured_meshes.ipynb)| [Camera position optimization](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/camera_position_optimization_with_differentiable_rendering.ipynb)|
48
+
49
+ | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/pointcloud_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/cow_deform.gif" width="310" height="310"/>
50
+ |:------------------------------------------------------------:|:--------------------------------------------------:|
51
+ | [Render textured pointclouds](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_colored_points.ipynb)| [Fit a mesh with texture](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_textured_mesh.ipynb)|
52
+
53
+ | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/densepose_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/shapenet_render.png" width="310" height="310"/>
54
+ |:------------------------------------------------------------:|:--------------------------------------------------:|
55
+ | [Render DensePose data](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_densepose.ipynb)| [Load & Render ShapeNet data](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/dataloaders_ShapeNetCore_R2N2.ipynb)|
56
+
57
+ | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_textured_volume.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_nerf.gif" width="310" height="310"/>
58
+ |:------------------------------------------------------------:|:--------------------------------------------------:|
59
+ | [Fit Textured Volume](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_textured_volume.ipynb)| [Fit A Simple Neural Radiance Field](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_simple_neural_radiance_field.ipynb)|
60
+
61
+ | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_textured_volume.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/implicitron_config.gif" width="310" height="310"/>
62
+ |:------------------------------------------------------------:|:--------------------------------------------------:|
63
+ | [Fit Textured Volume in Implicitron](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/implicitron_volumes.ipynb)| [Implicitron Config System](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/implicitron_config_system.ipynb)|
64
+
65
+
66
+
67
+
68
+
69
+ ## Documentation
70
+
71
+ Learn more about the API by reading the PyTorch3D [documentation](https://pytorch3d.readthedocs.org/).
72
+
73
+ We also have deep dive notes on several API components:
74
+
75
+ - [Heterogeneous Batching](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/batching.md)
76
+ - [Mesh IO](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/meshes_io.md)
77
+ - [Differentiable Rendering](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/renderer_getting_started.md)
78
+
79
+ ### Overview Video
80
+
81
+ We have created a short (~14 min) video tutorial providing an overview of the PyTorch3D codebase including several code examples. Click on the image below to watch the video on YouTube:
82
+
83
+ <a href="http://www.youtube.com/watch?v=Pph1r-x9nyY"><img src="http://img.youtube.com/vi/Pph1r-x9nyY/0.jpg" height="225" ></a>
84
+
85
+ ## Development
86
+
87
+ We welcome new contributions to PyTorch3D and we will be actively maintaining this library! Please refer to [CONTRIBUTING.md](./.github/CONTRIBUTING.md) for full instructions on how to run the code, tests and linter, and submit your pull requests.
88
+
89
+ ## Development and Compatibility
90
+
91
+ - `main` branch: actively developed, without any guarantee, Anything can be broken at any time
92
+ - REMARK: this includes nightly builds which are built from `main`
93
+ - HINT: the commit history can help locate regressions or changes
94
+ - backward-compatibility between releases: no guarantee. Best efforts to communicate breaking changes and facilitate migration of code or data (incl. models).
95
+
96
+ ## Contributors
97
+
98
+ PyTorch3D is written and maintained by the Facebook AI Research Computer Vision Team.
99
+
100
+ In alphabetical order:
101
+
102
+ * Amitav Baruah
103
+ * Steve Branson
104
+ * Krzysztof Chalupka
105
+ * Jiali Duan
106
+ * Luya Gao
107
+ * Georgia Gkioxari
108
+ * Taylor Gordon
109
+ * Justin Johnson
110
+ * Patrick Labatut
111
+ * Christoph Lassner
112
+ * Wan-Yen Lo
113
+ * David Novotny
114
+ * Nikhila Ravi
115
+ * Jeremy Reizenstein
116
+ * Dave Schnizlein
117
+ * Roman Shapovalov
118
+ * Olivia Wiles
119
+
120
+ ## Citation
121
+
122
+ If you find PyTorch3D useful in your research, please cite our tech report:
123
+
124
+ ```bibtex
125
+ @article{ravi2020pytorch3d,
126
+ author = {Nikhila Ravi and Jeremy Reizenstein and David Novotny and Taylor Gordon
127
+ and Wan-Yen Lo and Justin Johnson and Georgia Gkioxari},
128
+ title = {Accelerating 3D Deep Learning with PyTorch3D},
129
+ journal = {arXiv:2007.08501},
130
+ year = {2020},
131
+ }
132
+ ```
133
+
134
+ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRenderer` or `pytorch3d.renderer.points.pulsar.Renderer`), please cite the tech report:
135
+
136
+ ```bibtex
137
+ @article{lassner2020pulsar,
138
+ author = {Christoph Lassner and Michael Zollh\"ofer},
139
+ title = {Pulsar: Efficient Sphere-based Neural Rendering},
140
+ journal = {arXiv:2004.07484},
141
+ year = {2020},
142
+ }
143
+ ```
144
+
145
+ ## News
146
+
147
+ Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
148
+
149
+ **[Oct 31st 2023]:** PyTorch3D [v0.7.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.5) released.
150
+
151
+ **[May 10th 2023]:** PyTorch3D [v0.7.4](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.4) released.
152
+
153
+ **[Apr 5th 2023]:** PyTorch3D [v0.7.3](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.3) released.
154
+
155
+ **[Dec 19th 2022]:** PyTorch3D [v0.7.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.2) released.
156
+
157
+ **[Oct 23rd 2022]:** PyTorch3D [v0.7.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.1) released.
158
+
159
+ **[Aug 10th 2022]:** PyTorch3D [v0.7.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.0) released with Implicitron and MeshRasterizerOpenGL.
160
+
161
+ **[Apr 28th 2022]:** PyTorch3D [v0.6.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.2) released
162
+
163
+ **[Dec 16th 2021]:** PyTorch3D [v0.6.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.1) released
164
+
165
+ **[Oct 6th 2021]:** PyTorch3D [v0.6.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.0) released
166
+
167
+ **[Aug 5th 2021]:** PyTorch3D [v0.5.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.5.0) released
168
+
169
+ **[Feb 9th 2021]:** PyTorch3D [v0.4.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.4.0) released with support for implicit functions, volume rendering and a [reimplementation of NeRF](https://github.com/facebookresearch/pytorch3d/tree/main/projects/nerf).
170
+
171
+ **[November 2nd 2020]:** PyTorch3D [v0.3.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.3.0) released, integrating the pulsar backend.
172
+
173
+ **[Aug 28th 2020]:** PyTorch3D [v0.2.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.2.5) released
174
+
175
+ **[July 17th 2020]:** PyTorch3D tech report published on ArXiv: https://arxiv.org/abs/2007.08501
176
+
177
+ **[April 24th 2020]:** PyTorch3D [v0.2.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.2.0) released
178
+
179
+ **[March 25th 2020]:** [SynSin](https://arxiv.org/abs/1912.08804) codebase released using PyTorch3D: https://github.com/facebookresearch/synsin
180
+
181
+ **[March 8th 2020]:** PyTorch3D [v0.1.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.1.1) bug fix release
182
+
183
+ **[Jan 23rd 2020]:** PyTorch3D [v0.1.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.1.0) released. [Mesh R-CNN](https://arxiv.org/abs/1906.02739) codebase released: https://github.com/facebookresearch/meshrcnn
pytorch3d-0.7.8/dev/linter.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -e
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ # Run this script at project root by "./dev/linter.sh" before you commit
9
+
10
+ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
11
+ DIR=$(dirname "${DIR}")
12
+
13
+ if [[ -f "${DIR}/TARGETS" ]]
14
+ then
15
+ pyfmt "${DIR}"
16
+ else
17
+ # run usort externally only
18
+ echo "Running usort..."
19
+ usort "${DIR}"
20
+ fi
21
+
22
+ echo "Running black..."
23
+ black "${DIR}"
24
+
25
+ echo "Running flake..."
26
+ flake8 "${DIR}" || true
27
+
28
+ echo "Running clang-format ..."
29
+ clangformat=$(command -v clang-format-8 || echo clang-format)
30
+ find "${DIR}" -regex ".*\.\(cpp\|c\|cc\|cu\|cuh\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 "${clangformat}" -i
31
+
32
+ # Run arc and pyre internally only.
33
+ if [[ -f "${DIR}/TARGETS" ]]
34
+ then
35
+ (cd "${DIR}"; command -v arc > /dev/null && arc lint) || true
36
+
37
+ echo "Running pyre..."
38
+ echo "To restart/kill pyre server, run 'pyre restart' or 'pyre kill' in fbcode/"
39
+ ( cd ~/fbsource/fbcode; pyre -l vision/fair/pytorch3d/ )
40
+ fi
pytorch3d-0.7.8/dev/run_tutorials.sh ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/bash
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ # This script is for running some of the tutorials using the nightly build in
9
+ # an isolated environment. It is designed to be run in docker.
10
+
11
+ # If you run this script in this directory with
12
+ # sudo docker run --runtime=nvidia -it --rm -v $PWD/../docs/tutorials:/notebooks -v $PWD:/loc pytorch/conda-cuda bash /loc/run_tutorials.sh | tee log.txt
13
+ # it should execute some tutorials with the nightly build and resave them, and
14
+ # save a log in the current directory.
15
+
16
+ # We use nbconvert. runipy would be an alternative but it currently doesn't
17
+ # work well with plotly.
18
+
19
+ set -e
20
+
21
+ conda init bash
22
+ # shellcheck source=/dev/null
23
+ source ~/.bashrc
24
+ conda create -y -n myenv python=3.8 matplotlib ipython ipywidgets nbconvert
25
+ conda activate myenv
26
+ conda install -y -c iopath iopath
27
+ conda install -y -c pytorch pytorch=1.6.0 cudatoolkit=10.1 torchvision
28
+ conda install -y -c pytorch3d-nightly pytorch3d
29
+ pip install plotly scikit-image
30
+
31
+ for notebook in /notebooks/*.ipynb
32
+ do
33
+ name=$(basename "$notebook")
34
+
35
+ if [[ "$name" == "dataloaders_ShapeNetCore_R2N2.ipynb" ]]
36
+ then
37
+ #skip as data not easily available
38
+ continue
39
+ fi
40
+ if [[ "$name" == "render_densepose.ipynb" ]]
41
+ then
42
+ #skip as data not easily available
43
+ continue
44
+ fi
45
+
46
+ #comment the lines which install torch, torchvision and pytorch3d
47
+ sed -Ei '/(torchvision)|(pytorch3d)/ s/!pip/!#pip/' "$notebook"
48
+ #Don't let tqdm use widgets
49
+ sed -i 's/from tqdm.notebook import tqdm/from tqdm import tqdm/' "$notebook"
50
+
51
+ echo
52
+ echo "### ### ###"
53
+ echo "starting $name"
54
+ time jupyter nbconvert --to notebook --inplace --ExecutePreprocessor.kernel_name=python3 --execute "$notebook" || true
55
+ echo "ending $name"
56
+ done
pytorch3d-0.7.8/dev/test_list.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import ast
8
+ from pathlib import Path
9
+ from typing import List
10
+
11
+
12
+ """
13
+ This module outputs a list of tests for completion.
14
+ It has no dependencies.
15
+ """
16
+
17
+
18
+ def get_test_files() -> List[Path]:
19
+ root = Path(__file__).parent.parent
20
+ dirs = ["tests", "projects/implicitron_trainer"]
21
+ return [i for dir in dirs for i in (root / dir).glob("**/test*.py")]
22
+
23
+
24
+ def tests_from_file(path: Path, base: str) -> List[str]:
25
+ """
26
+ Returns all the tests in the given file, in format
27
+ expected as arguments when running the tests.
28
+ e.g.
29
+ file_stem
30
+ file_stem.TestFunctionality
31
+ file_stem.TestFunctionality.test_f
32
+ file_stem.TestFunctionality.test_g
33
+ """
34
+ with open(path) as f:
35
+ node = ast.parse(f.read())
36
+ out = [base]
37
+ for cls in node.body:
38
+ if not isinstance(cls, ast.ClassDef):
39
+ continue
40
+ if not cls.name.startswith("Test"):
41
+ continue
42
+ class_base = base + "." + cls.name
43
+ out.append(class_base)
44
+ for method in cls.body:
45
+ if not isinstance(method, ast.FunctionDef):
46
+ continue
47
+ if not method.name.startswith("test"):
48
+ continue
49
+ out.append(class_base + "." + method.name)
50
+ return out
51
+
52
+
53
+ def main() -> None:
54
+ files = get_test_files()
55
+ test_root = Path(__file__).parent.parent
56
+ all_tests = []
57
+ for f in files:
58
+ file_base = str(f.relative_to(test_root))[:-3].replace("/", ".")
59
+ all_tests.extend(tests_from_file(f, file_base))
60
+ for test in sorted(all_tests):
61
+ print(test)
62
+
63
+
64
+ if __name__ == "__main__":
65
+ main()
pytorch3d-0.7.8/docs/.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ source
2
+ _build
3
+ _static
4
+ _template
5
+ *-checkpoint.ipynb
6
+ .ipynb_checkpoints
7
+ .ipynb_checkpoints/**
pytorch3d-0.7.8/docs/.readthedocs.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Read the Docs configuration file
8
+ # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
9
+
10
+ # Required
11
+ version: 2
12
+
13
+ # Set the version of Python and other tools you might need
14
+ build:
15
+ os: ubuntu-22.04
16
+ tools:
17
+ python: "3.11"
18
+
19
+ # Build documentation in the docs/ directory with Sphinx
20
+ sphinx:
21
+ configuration: docs/conf.py
22
+
23
+ # We recommend specifying your dependencies to enable reproducible builds:
24
+ # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
25
+ python:
26
+ install:
27
+ - requirements: docs/requirements.txt
pytorch3d-0.7.8/docs/Makefile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Minimal makefile for Sphinx documentation
8
+
9
+ # You can set these variables from the command line, and also
10
+ # from the environment for the first two.
11
+ SPHINXOPTS =
12
+ SPHINXBUILD = sphinx-build
13
+ SOURCEDIR = .
14
+ BUILDDIR = _build
15
+
16
+ # Put it first so that "make" without argument is like "make help".
17
+ help:
18
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
19
+
20
+ .PHONY: help Makefile
21
+
22
+ # Catch-all target: route all unknown targets to Sphinx using the new
23
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
24
+ %: Makefile
25
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
pytorch3d-0.7.8/docs/README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Setup
2
+
3
+ ### Install dependencies
4
+
5
+ ```
6
+ pip install -U recommonmark sphinx sphinx_rtd_theme sphinx_markdown_tables
7
+ ```
8
+
9
+ ### Add symlink to the root README.md
10
+
11
+ We want to include the root readme as an overview. Before generating the docs create a symlink to the root readme.
12
+
13
+ ```
14
+ cd docs
15
+ ln -s ../README.md overview.md
16
+ ```
17
+
18
+ In `conf.py` for deployment this is done using `subprocess.call`.
19
+
20
+ ### Add a new file
21
+
22
+ Add a new `.md` or `.rst` file and add the name to the doc tree in `index.rst` e.g
23
+
24
+ ```
25
+ .. toctree::
26
+ :maxdepth: 1
27
+ :caption: Intro Documentation
28
+
29
+ overview
30
+ ```
31
+
32
+ To autogenerate docs from docstrings in the source code, add the import path for the function e.g.
33
+
34
+ ```
35
+ Chamfer Loss
36
+ --------------------
37
+
38
+ .. autoclass:: loss.chamfer.chamfer_distance
39
+ :members:
40
+ :undoc-members:
41
+
42
+ .. automethod:: __init__
43
+
44
+ ````
45
+
46
+ ### Build
47
+
48
+ From `pytorch3d/docs` run:
49
+
50
+ ```
51
+ > make html
52
+ ```
53
+
54
+ The website is generated in `_build/html`.
55
+
56
+ ### Common Issues
57
+
58
+ Sphinx can be fussy, and sometimes about things you weren’t expecting. For example, you might encounter something like:
59
+
60
+ WARNING: toctree contains reference to nonexisting document u'overview'
61
+ ...
62
+ checking consistency...
63
+ <pytorch3d>/docs/overview.rst::
64
+ WARNING: document isn't included in any toctree
65
+
66
+ You might have indented overview in the .. toctree:: in index.rst with four spaces, when Sphinx is expecting three.
67
+
68
+
69
+ ### View
70
+
71
+ Start a python simple server:
72
+
73
+ ```
74
+ > python -m http.server
75
+ ```
76
+
77
+ Navigate to: `http://0.0.0.0:8000/`
pytorch3d-0.7.8/docs/conf.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ # flake8: noqa
9
+
10
+ # Configuration file for the Sphinx documentation builder.
11
+ #
12
+ # This file only contains a selection of the most common options. For a full
13
+ # list see the documentation:
14
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
15
+
16
+ # If extensions (or modules to document with autodoc) are in another directory,
17
+ # add these directories to sys.path here. If the directory is relative to the
18
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
19
+ #
20
+ import os
21
+ import sys
22
+
23
+ import unittest.mock as mock
24
+
25
+ from recommonmark.parser import CommonMarkParser
26
+ from recommonmark.states import DummyStateMachine
27
+ from sphinx.builders.html import StandaloneHTMLBuilder
28
+ from sphinx.ext.autodoc import between
29
+
30
+
31
+ # Monkey patch to fix recommonmark 0.4 doc reference issues.
32
+ orig_run_role = DummyStateMachine.run_role
33
+
34
+
35
+ def run_role(self, name, options=None, content=None):
36
+ if name == "doc":
37
+ name = "any"
38
+ return orig_run_role(self, name, options, content)
39
+
40
+
41
+ DummyStateMachine.run_role = run_role
42
+
43
+
44
+ StandaloneHTMLBuilder.supported_image_types = [
45
+ "image/svg+xml",
46
+ "image/gif",
47
+ "image/png",
48
+ "image/jpeg",
49
+ ]
50
+
51
+ # -- Path setup --------------------------------------------------------------
52
+
53
+
54
+ sys.path.insert(0, os.path.abspath("../"))
55
+ sys.path.insert(0, os.path.abspath("../pytorch3d"))
56
+ sys.path.insert(0, os.path.abspath("../../"))
57
+
58
+ DEPLOY = os.environ.get("READTHEDOCS") == "True"
59
+ needs_sphinx = "1.7"
60
+
61
+
62
+ try:
63
+ import torch # noqa
64
+ except ImportError:
65
+ for m in [
66
+ "torch",
67
+ "torchvision",
68
+ "torch.nn",
69
+ "torch.autograd",
70
+ "torch.autograd.function",
71
+ "torch.nn.modules",
72
+ "torch.nn.modules.utils",
73
+ "torch.utils",
74
+ "torch.utils.data",
75
+ "torchvision",
76
+ "torchvision.ops",
77
+ ]:
78
+ sys.modules[m] = mock.Mock(name=m)
79
+
80
+ for m in ["cv2", "scipy", "numpy", "pytorch3d._C", "np.eye", "np.zeros"]:
81
+ sys.modules[m] = mock.Mock(name=m)
82
+
83
+ # -- Project information -----------------------------------------------------
84
+
85
+ project = "PyTorch3D"
86
+ copyright = "Meta Platforms, Inc"
87
+ author = "facebookresearch"
88
+
89
+ # The short X.Y version
90
+ version = ""
91
+
92
+ # The full version, including alpha/beta/rc tags
93
+ release = version
94
+
95
+ # -- General configuration ---------------------------------------------------
96
+
97
+ # Add any Sphinx extension module names here, as strings. They can be
98
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
99
+ # ones.
100
+
101
+ extensions = [
102
+ "sphinx_markdown_tables",
103
+ "sphinx.ext.autodoc",
104
+ "sphinx.ext.mathjax",
105
+ "sphinx.ext.napoleon",
106
+ "sphinx.ext.intersphinx",
107
+ "sphinx.ext.todo",
108
+ "sphinx.ext.coverage",
109
+ "sphinx.ext.viewcode",
110
+ "sphinx.ext.githubpages",
111
+ ]
112
+
113
+ # -- Configurations for plugins ------------
114
+ napoleon_google_docstring = True
115
+ napoleon_include_init_with_doc = True
116
+ napoleon_include_special_with_doc = True
117
+ napoleon_numpy_docstring = False
118
+ # napoleon_use_param = False
119
+ napoleon_use_rtype = False
120
+ autodoc_inherit_docstrings = False
121
+ autodoc_member_order = "bysource"
122
+
123
+ source_parsers = {".md": CommonMarkParser}
124
+
125
+
126
+ # The suffix(es) of source filenames.
127
+ # You can specify multiple suffix as a list of string:
128
+ #
129
+ source_suffix = [".rst", ".md"]
130
+
131
+ # The master toctree document.
132
+ master_doc = "index"
133
+
134
+ # Add any paths that contain templates here, relative to this directory.
135
+ templates_path = ["_templates"]
136
+
137
+ # List of patterns, relative to source directory, that match files and
138
+ # directories to ignore when looking for source files.
139
+ # This pattern also affects html_static_path and html_extra_path.
140
+ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md"]
141
+
142
+ # The name of the Pygments (syntax highlighting) style to use.
143
+ pygments_style = "sphinx"
144
+
145
+ # -- Options for HTML output -------------------------------------------------
146
+
147
+ # The theme to use for HTML and HTML Help pages. See the documentation for
148
+ # a list of builtin themes.
149
+ #
150
+ html_theme = "sphinx_rtd_theme"
151
+
152
+ # Add any paths that contain custom static files (such as style sheets) here,
153
+ # relative to this directory. They are copied after the builtin static files,
154
+ # so a file named "default.css" will overwrite the builtin "default.css".
155
+ html_static_path = ["_static"]
156
+
157
+ html_theme_options = {"collapse_navigation": True}
158
+
159
+
160
+ def url_resolver(url):
161
+ if ".html" not in url:
162
+ url = url.replace("../", "")
163
+ return "https://github.com/facebookresearch/pytorch3d/blob/main/" + url
164
+ else:
165
+ if DEPLOY:
166
+ return "http://pytorch3d.readthedocs.io/" + url
167
+ else:
168
+ return "/" + url
169
+
170
+
171
+ def setup(app):
172
+ # Add symlink to root README
173
+ if DEPLOY:
174
+ import subprocess
175
+
176
+ subprocess.call(["ln", "-s", "../README.md", "overview.md"])
177
+
178
+ from recommonmark.transform import AutoStructify
179
+
180
+ app.add_config_value(
181
+ "recommonmark_config",
182
+ {
183
+ "url_resolver": url_resolver,
184
+ "auto_toc_tree_section": "Contents",
185
+ "enable_math": True,
186
+ "enable_inline_math": True,
187
+ "enable_eval_rst": True,
188
+ "enable_auto_toc_tree": True,
189
+ },
190
+ True,
191
+ )
192
+
193
+ # Register a sphinx.ext.autodoc.between listener to ignore everything
194
+ # between lines that contain the word IGNORE
195
+ app.connect("autodoc-process-docstring", between("^.*IGNORE.*$", exclude=True))
196
+ app.add_transform(AutoStructify)
197
+
198
+ return app
pytorch3d-0.7.8/docs/examples/pulsar_basic.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This example demonstrates the most trivial, direct interface of the pulsar
10
+ sphere renderer. It renders and saves an image with 10 random spheres.
11
+ Output: basic.png.
12
+ """
13
+ import logging
14
+ import math
15
+ from os import path
16
+
17
+ import imageio
18
+ import torch
19
+ from pytorch3d.renderer.points.pulsar import Renderer
20
+
21
+
22
+ LOGGER = logging.getLogger(__name__)
23
+
24
+
25
+ def cli():
26
+ """
27
+ Basic example for the pulsar sphere renderer.
28
+
29
+ Writes to `basic.png`.
30
+ """
31
+ LOGGER.info("Rendering on GPU...")
32
+ torch.manual_seed(1)
33
+ n_points = 10
34
+ width = 1_000
35
+ height = 1_000
36
+ device = torch.device("cuda")
37
+ # The PyTorch3D system is right handed; in pulsar you can choose the handedness.
38
+ # For easy reproducibility we use a right handed coordinate system here.
39
+ renderer = Renderer(width, height, n_points, right_handed_system=True).to(device)
40
+ # Generate sample data.
41
+ vert_pos = torch.rand(n_points, 3, dtype=torch.float32, device=device) * 10.0
42
+ vert_pos[:, 2] += 25.0
43
+ vert_pos[:, :2] -= 5.0
44
+ vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
45
+ vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
46
+ cam_params = torch.tensor(
47
+ [
48
+ 0.0,
49
+ 0.0,
50
+ 0.0, # Position 0, 0, 0 (x, y, z).
51
+ 0.0,
52
+ math.pi, # Because of the right handed system, the camera must look 'back'.
53
+ 0.0, # Rotation 0, 0, 0 (in axis-angle format).
54
+ 5.0, # Focal length in world size.
55
+ 2.0, # Sensor size in world size.
56
+ ],
57
+ dtype=torch.float32,
58
+ device=device,
59
+ )
60
+ # Render.
61
+ image = renderer(
62
+ vert_pos,
63
+ vert_col,
64
+ vert_rad,
65
+ cam_params,
66
+ 1.0e-1, # Renderer blending parameter gamma, in [1., 1e-5].
67
+ 45.0, # Maximum depth.
68
+ )
69
+ LOGGER.info("Writing image to `%s`.", path.abspath("basic.png"))
70
+ imageio.imsave("basic.png", (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
71
+ LOGGER.info("Done.")
72
+
73
+
74
+ if __name__ == "__main__":
75
+ logging.basicConfig(level=logging.INFO)
76
+ cli()
pytorch3d-0.7.8/docs/examples/pulsar_basic_unified.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This example demonstrates the most trivial use of the pulsar PyTorch3D
10
+ interface for sphere renderering. It renders and saves an image with
11
+ 10 random spheres.
12
+ Output: basic-pt3d.png.
13
+ """
14
+ import logging
15
+ from os import path
16
+
17
+ import imageio
18
+ import torch
19
+ from pytorch3d.renderer import (
20
+ PerspectiveCameras,
21
+ PointsRasterizationSettings,
22
+ PointsRasterizer,
23
+ PulsarPointsRenderer,
24
+ )
25
+ from pytorch3d.structures import Pointclouds
26
+
27
+
28
+ LOGGER = logging.getLogger(__name__)
29
+
30
+
31
+ def cli():
32
+ """
33
+ Basic example for the pulsar sphere renderer using the PyTorch3D interface.
34
+
35
+ Writes to `basic-pt3d.png`.
36
+ """
37
+ LOGGER.info("Rendering on GPU...")
38
+ torch.manual_seed(1)
39
+ n_points = 10
40
+ width = 1_000
41
+ height = 1_000
42
+ device = torch.device("cuda")
43
+ # Generate sample data.
44
+ vert_pos = torch.rand(n_points, 3, dtype=torch.float32, device=device) * 10.0
45
+ vert_pos[:, 2] += 25.0
46
+ vert_pos[:, :2] -= 5.0
47
+ vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
48
+ pcl = Pointclouds(points=vert_pos[None, ...], features=vert_col[None, ...])
49
+ # Alternatively, you can also use the look_at_view_transform to get R and T:
50
+ # R, T = look_at_view_transform(
51
+ # dist=30.0, elev=0.0, azim=180.0, at=((0.0, 0.0, 30.0),), up=((0, 1, 0),),
52
+ # )
53
+ cameras = PerspectiveCameras(
54
+ # The focal length must be double the size for PyTorch3D because of the NDC
55
+ # coordinates spanning a range of two - and they must be normalized by the
56
+ # sensor width (see the pulsar example). This means we need here
57
+ # 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
58
+ focal_length=(5.0 * 2.0 / 2.0,),
59
+ R=torch.eye(3, dtype=torch.float32, device=device)[None, ...],
60
+ T=torch.zeros((1, 3), dtype=torch.float32, device=device),
61
+ image_size=((height, width),),
62
+ device=device,
63
+ )
64
+ vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
65
+ raster_settings = PointsRasterizationSettings(
66
+ image_size=(height, width),
67
+ radius=vert_rad,
68
+ )
69
+ rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
70
+ renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
71
+ # Render.
72
+ image = renderer(
73
+ pcl,
74
+ gamma=(1.0e-1,), # Renderer blending parameter gamma, in [1., 1e-5].
75
+ znear=(1.0,),
76
+ zfar=(45.0,),
77
+ radius_world=True,
78
+ bg_col=torch.ones((3,), dtype=torch.float32, device=device),
79
+ )[0]
80
+ LOGGER.info("Writing image to `%s`.", path.abspath("basic-pt3d.png"))
81
+ imageio.imsave(
82
+ "basic-pt3d.png", (image.cpu().detach() * 255.0).to(torch.uint8).numpy()
83
+ )
84
+ LOGGER.info("Done.")
85
+
86
+
87
+ if __name__ == "__main__":
88
+ logging.basicConfig(level=logging.INFO)
89
+ cli()
pytorch3d-0.7.8/docs/examples/pulsar_cam.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This example demonstrates camera parameter optimization with the plain
10
+ pulsar interface. For this, a reference image has been pre-generated
11
+ (you can find it at `../../tests/pulsar/reference/examples_TestRenderer_test_cam.png`).
12
+ The same scene parameterization is loaded and the camera parameters
13
+ distorted. Gradient-based optimization is used to converge towards the
14
+ original camera parameters.
15
+ Output: cam.gif.
16
+ """
17
+ import logging
18
+ import math
19
+ from os import path
20
+
21
+ import cv2
22
+ import imageio
23
+ import numpy as np
24
+ import torch
25
+ from pytorch3d.renderer.points.pulsar import Renderer
26
+ from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_rotation_6d
27
+ from torch import nn, optim
28
+
29
+
30
+ LOGGER = logging.getLogger(__name__)
31
+ N_POINTS = 20
32
+ WIDTH = 1_000
33
+ HEIGHT = 1_000
34
+ DEVICE = torch.device("cuda")
35
+
36
+
37
+ class SceneModel(nn.Module):
38
+ """
39
+ A simple scene model to demonstrate use of pulsar in PyTorch modules.
40
+
41
+ The scene model is parameterized with sphere locations (vert_pos),
42
+ channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
43
+ camera rotation (cam_rot) and sensor focal length and width (cam_sensor).
44
+
45
+ The forward method of the model renders this scene description. Any
46
+ of these parameters could instead be passed as inputs to the forward
47
+ method and come from a different model.
48
+ """
49
+
50
+ def __init__(self):
51
+ super(SceneModel, self).__init__()
52
+ self.gamma = 0.1
53
+ # Points.
54
+ torch.manual_seed(1)
55
+ vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
56
+ vert_pos[:, 2] += 25.0
57
+ vert_pos[:, :2] -= 5.0
58
+ self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
59
+ self.register_parameter(
60
+ "vert_col",
61
+ nn.Parameter(
62
+ torch.rand(N_POINTS, 3, dtype=torch.float32), requires_grad=False
63
+ ),
64
+ )
65
+ self.register_parameter(
66
+ "vert_rad",
67
+ nn.Parameter(
68
+ torch.rand(N_POINTS, dtype=torch.float32), requires_grad=False
69
+ ),
70
+ )
71
+ self.register_parameter(
72
+ "cam_pos",
73
+ nn.Parameter(
74
+ torch.tensor([0.1, 0.1, 0.0], dtype=torch.float32), requires_grad=True
75
+ ),
76
+ )
77
+ self.register_parameter(
78
+ "cam_rot",
79
+ # We're using the 6D rot. representation for better gradients.
80
+ nn.Parameter(
81
+ matrix_to_rotation_6d(
82
+ axis_angle_to_matrix(
83
+ torch.tensor(
84
+ [
85
+ [0.02, math.pi + 0.02, 0.01],
86
+ ],
87
+ dtype=torch.float32,
88
+ )
89
+ )
90
+ )[0],
91
+ requires_grad=True,
92
+ ),
93
+ )
94
+ self.register_parameter(
95
+ "cam_sensor",
96
+ nn.Parameter(
97
+ torch.tensor([4.8, 1.8], dtype=torch.float32), requires_grad=True
98
+ ),
99
+ )
100
+ self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
101
+
102
+ def forward(self):
103
+ return self.renderer.forward(
104
+ self.vert_pos,
105
+ self.vert_col,
106
+ self.vert_rad,
107
+ torch.cat([self.cam_pos, self.cam_rot, self.cam_sensor]),
108
+ self.gamma,
109
+ 45.0,
110
+ )
111
+
112
+
113
+ def cli():
114
+ """
115
+ Camera optimization example using pulsar.
116
+
117
+ Writes to `cam.gif`.
118
+ """
119
+ LOGGER.info("Loading reference...")
120
+ # Load reference.
121
+ ref = (
122
+ torch.from_numpy(
123
+ imageio.imread(
124
+ "../../tests/pulsar/reference/examples_TestRenderer_test_cam.png"
125
+ )[:, ::-1, :].copy()
126
+ ).to(torch.float32)
127
+ / 255.0
128
+ ).to(DEVICE)
129
+ # Set up model.
130
+ model = SceneModel().to(DEVICE)
131
+ # Optimizer.
132
+ optimizer = optim.SGD(
133
+ [
134
+ {"params": [model.cam_pos], "lr": 1e-4}, # 1e-3
135
+ {"params": [model.cam_rot], "lr": 5e-6},
136
+ {"params": [model.cam_sensor], "lr": 1e-4},
137
+ ]
138
+ )
139
+
140
+ LOGGER.info("Writing video to `%s`.", path.abspath("cam.gif"))
141
+ writer = imageio.get_writer("cam.gif", format="gif", fps=25)
142
+
143
+ # Optimize.
144
+ for i in range(300):
145
+ optimizer.zero_grad()
146
+ result = model()
147
+ # Visualize.
148
+ result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
149
+ cv2.imshow("opt", result_im[:, :, ::-1])
150
+ writer.append_data(result_im)
151
+ overlay_img = np.ascontiguousarray(
152
+ ((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
153
+ :, :, ::-1
154
+ ]
155
+ )
156
+ overlay_img = cv2.putText(
157
+ overlay_img,
158
+ "Step %d" % (i),
159
+ (10, 40),
160
+ cv2.FONT_HERSHEY_SIMPLEX,
161
+ 1,
162
+ (0, 0, 0),
163
+ 2,
164
+ cv2.LINE_AA,
165
+ False,
166
+ )
167
+ cv2.imshow("overlay", overlay_img)
168
+ cv2.waitKey(1)
169
+ # Update.
170
+ loss = ((result - ref) ** 2).sum()
171
+ LOGGER.info("loss %d: %f", i, loss.item())
172
+ loss.backward()
173
+ optimizer.step()
174
+ writer.close()
175
+ LOGGER.info("Done.")
176
+
177
+
178
+ if __name__ == "__main__":
179
+ logging.basicConfig(level=logging.INFO)
180
+ cli()
pytorch3d-0.7.8/docs/examples/pulsar_cam_unified.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This example demonstrates camera parameter optimization with the pulsar
10
+ PyTorch3D interface. For this, a reference image has been pre-generated
11
+ (you can find it at `../../tests/pulsar/reference/examples_TestRenderer_test_cam.png`).
12
+ The same scene parameterization is loaded and the camera parameters
13
+ distorted. Gradient-based optimization is used to converge towards the
14
+ original camera parameters.
15
+ Output: cam-pt3d.gif
16
+ """
17
+ import logging
18
+ from os import path
19
+
20
+ import cv2
21
+ import imageio
22
+ import numpy as np
23
+ import torch
24
+ from pytorch3d.renderer.cameras import PerspectiveCameras
25
+ from pytorch3d.renderer.points import (
26
+ PointsRasterizationSettings,
27
+ PointsRasterizer,
28
+ PulsarPointsRenderer,
29
+ )
30
+ from pytorch3d.structures.pointclouds import Pointclouds
31
+ from pytorch3d.transforms import axis_angle_to_matrix
32
+ from torch import nn, optim
33
+
34
+
35
+ LOGGER = logging.getLogger(__name__)
36
+ N_POINTS = 20
37
+ WIDTH = 1_000
38
+ HEIGHT = 1_000
39
+ DEVICE = torch.device("cuda")
40
+
41
+
42
+ class SceneModel(nn.Module):
43
+ """
44
+ A simple scene model to demonstrate use of pulsar in PyTorch modules.
45
+
46
+ The scene model is parameterized with sphere locations (vert_pos),
47
+ channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
48
+ camera rotation (cam_rot) and sensor focal length and width (cam_sensor).
49
+
50
+ The forward method of the model renders this scene description. Any
51
+ of these parameters could instead be passed as inputs to the forward
52
+ method and come from a different model.
53
+ """
54
+
55
+ def __init__(self):
56
+ super(SceneModel, self).__init__()
57
+ self.gamma = 0.1
58
+ # Points.
59
+ torch.manual_seed(1)
60
+ vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
61
+ vert_pos[:, 2] += 25.0
62
+ vert_pos[:, :2] -= 5.0
63
+ self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
64
+ self.register_parameter(
65
+ "vert_col",
66
+ nn.Parameter(
67
+ torch.rand(N_POINTS, 3, dtype=torch.float32),
68
+ requires_grad=False,
69
+ ),
70
+ )
71
+ self.register_parameter(
72
+ "vert_rad",
73
+ nn.Parameter(
74
+ torch.rand(N_POINTS, dtype=torch.float32),
75
+ requires_grad=False,
76
+ ),
77
+ )
78
+ self.register_parameter(
79
+ "cam_pos",
80
+ nn.Parameter(
81
+ torch.tensor([0.1, 0.1, 0.0], dtype=torch.float32),
82
+ requires_grad=True,
83
+ ),
84
+ )
85
+ self.register_parameter(
86
+ "cam_rot",
87
+ # We're using the 6D rot. representation for better gradients.
88
+ nn.Parameter(
89
+ axis_angle_to_matrix(
90
+ torch.tensor(
91
+ [
92
+ [0.02, 0.02, 0.01],
93
+ ],
94
+ dtype=torch.float32,
95
+ )
96
+ )[0],
97
+ requires_grad=True,
98
+ ),
99
+ )
100
+ self.register_parameter(
101
+ "focal_length",
102
+ nn.Parameter(
103
+ torch.tensor(
104
+ [
105
+ 4.8 * 2.0 / 2.0,
106
+ ],
107
+ dtype=torch.float32,
108
+ ),
109
+ requires_grad=True,
110
+ ),
111
+ )
112
+ self.cameras = PerspectiveCameras(
113
+ # The focal length must be double the size for PyTorch3D because of the NDC
114
+ # coordinates spanning a range of two - and they must be normalized by the
115
+ # sensor width (see the pulsar example). This means we need here
116
+ # 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
117
+ #
118
+ # R, T and f are provided here, but will be provided again
119
+ # at every call to the forward method. The reason are problems
120
+ # with PyTorch which makes device placement for gradients problematic
121
+ # for tensors which are themselves on a 'gradient path' but not
122
+ # leafs in the calculation tree. This will be addressed by an architectural
123
+ # change in PyTorch3D in the future. Until then, this workaround is
124
+ # recommended.
125
+ focal_length=self.focal_length,
126
+ R=self.cam_rot[None, ...],
127
+ T=self.cam_pos[None, ...],
128
+ image_size=((HEIGHT, WIDTH),),
129
+ device=DEVICE,
130
+ )
131
+ raster_settings = PointsRasterizationSettings(
132
+ image_size=(HEIGHT, WIDTH),
133
+ radius=self.vert_rad,
134
+ )
135
+ rasterizer = PointsRasterizer(
136
+ cameras=self.cameras, raster_settings=raster_settings
137
+ )
138
+ self.renderer = PulsarPointsRenderer(rasterizer=rasterizer)
139
+
140
+ def forward(self):
141
+ # The Pointclouds object creates copies of it's arguments - that's why
142
+ # we have to create a new object in every forward step.
143
+ pcl = Pointclouds(
144
+ points=self.vert_pos[None, ...], features=self.vert_col[None, ...]
145
+ )
146
+ return self.renderer(
147
+ pcl,
148
+ gamma=(self.gamma,),
149
+ zfar=(45.0,),
150
+ znear=(1.0,),
151
+ radius_world=True,
152
+ bg_col=torch.ones((3,), dtype=torch.float32, device=DEVICE),
153
+ # As mentioned above: workaround for device placement of gradients for
154
+ # camera parameters.
155
+ focal_length=self.focal_length,
156
+ R=self.cam_rot[None, ...],
157
+ T=self.cam_pos[None, ...],
158
+ )[0]
159
+
160
+
161
+ def cli():
162
+ """
163
+ Camera optimization example using pulsar.
164
+
165
+ Writes to `cam.gif`.
166
+ """
167
+ LOGGER.info("Loading reference...")
168
+ # Load reference.
169
+ ref = (
170
+ torch.from_numpy(
171
+ imageio.imread(
172
+ "../../tests/pulsar/reference/examples_TestRenderer_test_cam.png"
173
+ )[:, ::-1, :].copy()
174
+ ).to(torch.float32)
175
+ / 255.0
176
+ ).to(DEVICE)
177
+ # Set up model.
178
+ model = SceneModel().to(DEVICE)
179
+ # Optimizer.
180
+ optimizer = optim.SGD(
181
+ [
182
+ {"params": [model.cam_pos], "lr": 1e-4},
183
+ {"params": [model.cam_rot], "lr": 5e-6},
184
+ # Using a higher lr for the focal length here, because
185
+ # the sensor width can not be optimized directly.
186
+ {"params": [model.focal_length], "lr": 1e-3},
187
+ ]
188
+ )
189
+
190
+ LOGGER.info("Writing video to `%s`.", path.abspath("cam-pt3d.gif"))
191
+ writer = imageio.get_writer("cam-pt3d.gif", format="gif", fps=25)
192
+
193
+ # Optimize.
194
+ for i in range(300):
195
+ optimizer.zero_grad()
196
+ result = model()
197
+ # Visualize.
198
+ result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
199
+ cv2.imshow("opt", result_im[:, :, ::-1])
200
+ writer.append_data(result_im)
201
+ overlay_img = np.ascontiguousarray(
202
+ ((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
203
+ :, :, ::-1
204
+ ]
205
+ )
206
+ overlay_img = cv2.putText(
207
+ overlay_img,
208
+ "Step %d" % (i),
209
+ (10, 40),
210
+ cv2.FONT_HERSHEY_SIMPLEX,
211
+ 1,
212
+ (0, 0, 0),
213
+ 2,
214
+ cv2.LINE_AA,
215
+ False,
216
+ )
217
+ cv2.imshow("overlay", overlay_img)
218
+ cv2.waitKey(1)
219
+ # Update.
220
+ loss = ((result - ref) ** 2).sum()
221
+ LOGGER.info("loss %d: %f", i, loss.item())
222
+ loss.backward()
223
+ optimizer.step()
224
+ writer.close()
225
+ LOGGER.info("Done.")
226
+
227
+
228
+ if __name__ == "__main__":
229
+ logging.basicConfig(level=logging.INFO)
230
+ cli()
pytorch3d-0.7.8/docs/examples/pulsar_multiview.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This example demonstrates multiview 3D reconstruction using the plain
10
+ pulsar interface. For this, reference images have been pre-generated
11
+ (you can find them at
12
+ `../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png`).
13
+ The camera parameters are assumed given. The scene is initialized with
14
+ random spheres. Gradient-based optimization is used to optimize sphere
15
+ parameters and prune spheres to converge to a 3D representation.
16
+
17
+ This example is not available yet through the 'unified' interface,
18
+ because opacity support has not landed in PyTorch3D for general data
19
+ structures yet.
20
+ """
21
+ import logging
22
+ import math
23
+ from os import path
24
+
25
+ import cv2
26
+ import imageio
27
+ import numpy as np
28
+ import torch
29
+ from pytorch3d.renderer.points.pulsar import Renderer
30
+ from torch import nn, optim
31
+
32
+
33
+ LOGGER = logging.getLogger(__name__)
34
+ N_POINTS = 400_000
35
+ WIDTH = 1_000
36
+ HEIGHT = 1_000
37
+ VISUALIZE_IDS = [0, 1]
38
+ DEVICE = torch.device("cuda")
39
+
40
+
41
+ class SceneModel(nn.Module):
42
+ """
43
+ A simple scene model to demonstrate use of pulsar in PyTorch modules.
44
+
45
+ The scene model is parameterized with sphere locations (vert_pos),
46
+ channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
47
+ camera rotation (cam_rot) and sensor focal length and width (cam_sensor).
48
+
49
+ The forward method of the model renders this scene description. Any
50
+ of these parameters could instead be passed as inputs to the forward
51
+ method and come from a different model. Optionally, camera parameters can
52
+ be provided to the forward method in which case the scene is rendered
53
+ using those parameters.
54
+ """
55
+
56
+ def __init__(self):
57
+ super(SceneModel, self).__init__()
58
+ self.gamma = 1.0
59
+ # Points.
60
+ torch.manual_seed(1)
61
+ vert_pos = torch.rand((1, N_POINTS, 3), dtype=torch.float32) * 10.0
62
+ vert_pos[:, :, 2] += 25.0
63
+ vert_pos[:, :, :2] -= 5.0
64
+ self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
65
+ self.register_parameter(
66
+ "vert_col",
67
+ nn.Parameter(
68
+ torch.ones(1, N_POINTS, 3, dtype=torch.float32) * 0.5,
69
+ requires_grad=True,
70
+ ),
71
+ )
72
+ self.register_parameter(
73
+ "vert_rad",
74
+ nn.Parameter(
75
+ torch.ones(1, N_POINTS, dtype=torch.float32) * 0.05, requires_grad=True
76
+ ),
77
+ )
78
+ self.register_parameter(
79
+ "vert_opy",
80
+ nn.Parameter(
81
+ torch.ones(1, N_POINTS, dtype=torch.float32), requires_grad=True
82
+ ),
83
+ )
84
+ self.register_buffer(
85
+ "cam_params",
86
+ torch.tensor(
87
+ [
88
+ [
89
+ np.sin(angle) * 35.0,
90
+ 0.0,
91
+ 30.0 - np.cos(angle) * 35.0,
92
+ 0.0,
93
+ -angle + math.pi,
94
+ 0.0,
95
+ 5.0,
96
+ 2.0,
97
+ ]
98
+ for angle in [-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]
99
+ ],
100
+ dtype=torch.float32,
101
+ ),
102
+ )
103
+ self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
104
+
105
+ def forward(self, cam=None):
106
+ if cam is None:
107
+ cam = self.cam_params
108
+ n_views = 8
109
+ else:
110
+ n_views = 1
111
+ return self.renderer.forward(
112
+ self.vert_pos.expand(n_views, -1, -1),
113
+ self.vert_col.expand(n_views, -1, -1),
114
+ self.vert_rad.expand(n_views, -1),
115
+ cam,
116
+ self.gamma,
117
+ 45.0,
118
+ )
119
+
120
+
121
+ def cli():
122
+ """
123
+ Simple demonstration for a multi-view 3D reconstruction using pulsar.
124
+
125
+ This example makes use of opacity, which is not yet supported through
126
+ the unified PyTorch3D interface.
127
+
128
+ Writes to `multiview.gif`.
129
+ """
130
+ LOGGER.info("Loading reference...")
131
+ # Load reference.
132
+ ref = torch.stack(
133
+ [
134
+ torch.from_numpy(
135
+ imageio.imread(
136
+ "../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png"
137
+ % idx
138
+ )
139
+ ).to(torch.float32)
140
+ / 255.0
141
+ for idx in range(8)
142
+ ]
143
+ ).to(DEVICE)
144
+ # Set up model.
145
+ model = SceneModel().to(DEVICE)
146
+ # Optimizer.
147
+ optimizer = optim.SGD(
148
+ [
149
+ {"params": [model.vert_col], "lr": 1e-1},
150
+ {"params": [model.vert_rad], "lr": 1e-3},
151
+ {"params": [model.vert_pos], "lr": 1e-3},
152
+ ]
153
+ )
154
+
155
+ # For visualization.
156
+ angle = 0.0
157
+ LOGGER.info("Writing video to `%s`.", path.abspath("multiview.avi"))
158
+ writer = imageio.get_writer("multiview.gif", format="gif", fps=25)
159
+
160
+ # Optimize.
161
+ for i in range(300):
162
+ optimizer.zero_grad()
163
+ result = model()
164
+ # Visualize.
165
+ result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
166
+ cv2.imshow("opt", result_im[0, :, :, ::-1])
167
+ overlay_img = np.ascontiguousarray(
168
+ ((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
169
+ 0, :, :, ::-1
170
+ ]
171
+ )
172
+ overlay_img = cv2.putText(
173
+ overlay_img,
174
+ "Step %d" % (i),
175
+ (10, 40),
176
+ cv2.FONT_HERSHEY_SIMPLEX,
177
+ 1,
178
+ (0, 0, 0),
179
+ 2,
180
+ cv2.LINE_AA,
181
+ False,
182
+ )
183
+ cv2.imshow("overlay", overlay_img)
184
+ cv2.waitKey(1)
185
+ # Update.
186
+ loss = ((result - ref) ** 2).sum()
187
+ LOGGER.info("loss %d: %f", i, loss.item())
188
+ loss.backward()
189
+ optimizer.step()
190
+ # Cleanup.
191
+ with torch.no_grad():
192
+ model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
193
+ # Remove points.
194
+ model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
195
+ model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
196
+ vd = (
197
+ (model.vert_col - torch.ones(1, 1, 3, dtype=torch.float32).to(DEVICE))
198
+ .abs()
199
+ .sum(dim=2)
200
+ )
201
+ model.vert_pos.data[vd <= 0.2] = -1000.0
202
+ # Rotating visualization.
203
+ cam_control = torch.tensor(
204
+ [
205
+ [
206
+ np.sin(angle) * 35.0,
207
+ 0.0,
208
+ 30.0 - np.cos(angle) * 35.0,
209
+ 0.0,
210
+ -angle + math.pi,
211
+ 0.0,
212
+ 5.0,
213
+ 2.0,
214
+ ]
215
+ ],
216
+ dtype=torch.float32,
217
+ ).to(DEVICE)
218
+ with torch.no_grad():
219
+ result = model.forward(cam=cam_control)[0]
220
+ result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
221
+ cv2.imshow("vis", result_im[:, :, ::-1])
222
+ writer.append_data(result_im)
223
+ angle += 0.05
224
+ writer.close()
225
+ LOGGER.info("Done.")
226
+
227
+
228
+ if __name__ == "__main__":
229
+ logging.basicConfig(level=logging.INFO)
230
+ cli()
pytorch3d-0.7.8/docs/examples/pulsar_optimization.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This example demonstrates scene optimization with the plain
10
+ pulsar interface. For this, a reference image has been pre-generated
11
+ (you can find it at `../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png`).
12
+ The scene is initialized with random spheres. Gradient-based
13
+ optimization is used to converge towards a faithful
14
+ scene representation.
15
+ """
16
+ import logging
17
+ import math
18
+
19
+ import cv2
20
+ import imageio
21
+ import numpy as np
22
+ import torch
23
+ from pytorch3d.renderer.points.pulsar import Renderer
24
+ from torch import nn, optim
25
+
26
+
27
+ LOGGER = logging.getLogger(__name__)
28
+ N_POINTS = 10_000
29
+ WIDTH = 1_000
30
+ HEIGHT = 1_000
31
+ DEVICE = torch.device("cuda")
32
+
33
+
34
+ class SceneModel(nn.Module):
35
+ """
36
+ A simple scene model to demonstrate use of pulsar in PyTorch modules.
37
+
38
+ The scene model is parameterized with sphere locations (vert_pos),
39
+ channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
40
+ camera rotation (cam_rot) and sensor focal length and width (cam_sensor).
41
+
42
+ The forward method of the model renders this scene description. Any
43
+ of these parameters could instead be passed as inputs to the forward
44
+ method and come from a different model.
45
+ """
46
+
47
+ def __init__(self):
48
+ super(SceneModel, self).__init__()
49
+ self.gamma = 1.0
50
+ # Points.
51
+ torch.manual_seed(1)
52
+ vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
53
+ vert_pos[:, 2] += 25.0
54
+ vert_pos[:, :2] -= 5.0
55
+ self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
56
+ self.register_parameter(
57
+ "vert_col",
58
+ nn.Parameter(
59
+ torch.ones(N_POINTS, 3, dtype=torch.float32) * 0.5, requires_grad=True
60
+ ),
61
+ )
62
+ self.register_parameter(
63
+ "vert_rad",
64
+ nn.Parameter(
65
+ torch.ones(N_POINTS, dtype=torch.float32) * 0.3, requires_grad=True
66
+ ),
67
+ )
68
+ self.register_buffer(
69
+ "cam_params",
70
+ torch.tensor(
71
+ [0.0, 0.0, 0.0, 0.0, math.pi, 0.0, 5.0, 2.0], dtype=torch.float32
72
+ ),
73
+ )
74
+ # The volumetric optimization works better with a higher number of tracked
75
+ # intersections per ray.
76
+ self.renderer = Renderer(
77
+ WIDTH, HEIGHT, N_POINTS, n_track=32, right_handed_system=True
78
+ )
79
+
80
+ def forward(self):
81
+ return self.renderer.forward(
82
+ self.vert_pos,
83
+ self.vert_col,
84
+ self.vert_rad,
85
+ self.cam_params,
86
+ self.gamma,
87
+ 45.0,
88
+ return_forward_info=True,
89
+ )
90
+
91
+
92
+ def cli():
93
+ """
94
+ Scene optimization example using pulsar.
95
+ """
96
+ LOGGER.info("Loading reference...")
97
+ # Load reference.
98
+ ref = (
99
+ torch.from_numpy(
100
+ imageio.imread(
101
+ "../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png"
102
+ )[:, ::-1, :].copy()
103
+ ).to(torch.float32)
104
+ / 255.0
105
+ ).to(DEVICE)
106
+ # Set up model.
107
+ model = SceneModel().to(DEVICE)
108
+ # Optimizer.
109
+ optimizer = optim.SGD(
110
+ [
111
+ {"params": [model.vert_col], "lr": 1e0},
112
+ {"params": [model.vert_rad], "lr": 5e-3},
113
+ {"params": [model.vert_pos], "lr": 1e-2},
114
+ ]
115
+ )
116
+ LOGGER.info("Optimizing...")
117
+ # Optimize.
118
+ for i in range(500):
119
+ optimizer.zero_grad()
120
+ result, result_info = model()
121
+ # Visualize.
122
+ result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
123
+ cv2.imshow("opt", result_im[:, :, ::-1])
124
+ overlay_img = np.ascontiguousarray(
125
+ ((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
126
+ :, :, ::-1
127
+ ]
128
+ )
129
+ overlay_img = cv2.putText(
130
+ overlay_img,
131
+ "Step %d" % (i),
132
+ (10, 40),
133
+ cv2.FONT_HERSHEY_SIMPLEX,
134
+ 1,
135
+ (0, 0, 0),
136
+ 2,
137
+ cv2.LINE_AA,
138
+ False,
139
+ )
140
+ cv2.imshow("overlay", overlay_img)
141
+ cv2.waitKey(1)
142
+ # Update.
143
+ loss = ((result - ref) ** 2).sum()
144
+ LOGGER.info("loss %d: %f", i, loss.item())
145
+ loss.backward()
146
+ optimizer.step()
147
+ # Cleanup.
148
+ with torch.no_grad():
149
+ model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
150
+ # Remove points.
151
+ model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
152
+ model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
153
+ vd = (
154
+ (model.vert_col - torch.ones(3, dtype=torch.float32).to(DEVICE))
155
+ .abs()
156
+ .sum(dim=1)
157
+ )
158
+ model.vert_pos.data[vd <= 0.2] = -1000.0
159
+ LOGGER.info("Done.")
160
+
161
+
162
+ if __name__ == "__main__":
163
+ logging.basicConfig(level=logging.INFO)
164
+ cli()
pytorch3d-0.7.8/docs/examples/pulsar_optimization_unified.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This example demonstrates scene optimization with the PyTorch3D
10
+ pulsar interface. For this, a reference image has been pre-generated
11
+ (you can find it at `../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png`).
12
+ The scene is initialized with random spheres. Gradient-based
13
+ optimization is used to converge towards a faithful
14
+ scene representation.
15
+ """
16
+ import logging
17
+ import math
18
+
19
+ import cv2
20
+ import imageio
21
+ import numpy as np
22
+ import torch
23
+ from pytorch3d.renderer.cameras import PerspectiveCameras
24
+ from pytorch3d.renderer.points import (
25
+ PointsRasterizationSettings,
26
+ PointsRasterizer,
27
+ PulsarPointsRenderer,
28
+ )
29
+ from pytorch3d.structures.pointclouds import Pointclouds
30
+ from torch import nn, optim
31
+
32
+
33
+ LOGGER = logging.getLogger(__name__)
34
+ N_POINTS = 10_000
35
+ WIDTH = 1_000
36
+ HEIGHT = 1_000
37
+ DEVICE = torch.device("cuda")
38
+
39
+
40
+ class SceneModel(nn.Module):
41
+ """
42
+ A simple scene model to demonstrate use of pulsar in PyTorch modules.
43
+
44
+ The scene model is parameterized with sphere locations (vert_pos),
45
+ channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
46
+ camera rotation (cam_rot) and sensor focal length and width (cam_sensor).
47
+
48
+ The forward method of the model renders this scene description. Any
49
+ of these parameters could instead be passed as inputs to the forward
50
+ method and come from a different model.
51
+ """
52
+
53
+ def __init__(self):
54
+ super(SceneModel, self).__init__()
55
+ self.gamma = 1.0
56
+ # Points.
57
+ torch.manual_seed(1)
58
+ vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32, device=DEVICE) * 10.0
59
+ vert_pos[:, 2] += 25.0
60
+ vert_pos[:, :2] -= 5.0
61
+ self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
62
+ self.register_parameter(
63
+ "vert_col",
64
+ nn.Parameter(
65
+ torch.ones(N_POINTS, 3, dtype=torch.float32, device=DEVICE) * 0.5,
66
+ requires_grad=True,
67
+ ),
68
+ )
69
+ self.register_parameter(
70
+ "vert_rad",
71
+ nn.Parameter(
72
+ torch.ones(N_POINTS, dtype=torch.float32) * 0.3, requires_grad=True
73
+ ),
74
+ )
75
+ self.register_buffer(
76
+ "cam_params",
77
+ torch.tensor(
78
+ [0.0, 0.0, 0.0, 0.0, math.pi, 0.0, 5.0, 2.0], dtype=torch.float32
79
+ ),
80
+ )
81
+ self.cameras = PerspectiveCameras(
82
+ # The focal length must be double the size for PyTorch3D because of the NDC
83
+ # coordinates spanning a range of two - and they must be normalized by the
84
+ # sensor width (see the pulsar example). This means we need here
85
+ # 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
86
+ focal_length=5.0,
87
+ R=torch.eye(3, dtype=torch.float32, device=DEVICE)[None, ...],
88
+ T=torch.zeros((1, 3), dtype=torch.float32, device=DEVICE),
89
+ image_size=((HEIGHT, WIDTH),),
90
+ device=DEVICE,
91
+ )
92
+ raster_settings = PointsRasterizationSettings(
93
+ image_size=(HEIGHT, WIDTH),
94
+ radius=self.vert_rad,
95
+ )
96
+ rasterizer = PointsRasterizer(
97
+ cameras=self.cameras, raster_settings=raster_settings
98
+ )
99
+ self.renderer = PulsarPointsRenderer(rasterizer=rasterizer, n_track=32)
100
+
101
+ def forward(self):
102
+ # The Pointclouds object creates copies of it's arguments - that's why
103
+ # we have to create a new object in every forward step.
104
+ pcl = Pointclouds(
105
+ points=self.vert_pos[None, ...], features=self.vert_col[None, ...]
106
+ )
107
+ return self.renderer(
108
+ pcl,
109
+ gamma=(self.gamma,),
110
+ zfar=(45.0,),
111
+ znear=(1.0,),
112
+ radius_world=True,
113
+ bg_col=torch.ones((3,), dtype=torch.float32, device=DEVICE),
114
+ )[0]
115
+
116
+
117
+ def cli():
118
+ """
119
+ Scene optimization example using pulsar and the unified PyTorch3D interface.
120
+ """
121
+ LOGGER.info("Loading reference...")
122
+ # Load reference.
123
+ ref = (
124
+ torch.from_numpy(
125
+ imageio.imread(
126
+ "../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png"
127
+ )[:, ::-1, :].copy()
128
+ ).to(torch.float32)
129
+ / 255.0
130
+ ).to(DEVICE)
131
+ # Set up model.
132
+ model = SceneModel().to(DEVICE)
133
+ # Optimizer.
134
+ optimizer = optim.SGD(
135
+ [
136
+ {"params": [model.vert_col], "lr": 1e0},
137
+ {"params": [model.vert_rad], "lr": 5e-3},
138
+ {"params": [model.vert_pos], "lr": 1e-2},
139
+ ]
140
+ )
141
+ LOGGER.info("Optimizing...")
142
+ # Optimize.
143
+ for i in range(500):
144
+ optimizer.zero_grad()
145
+ result = model()
146
+ # Visualize.
147
+ result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
148
+ cv2.imshow("opt", result_im[:, :, ::-1])
149
+ overlay_img = np.ascontiguousarray(
150
+ ((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
151
+ :, :, ::-1
152
+ ]
153
+ )
154
+ overlay_img = cv2.putText(
155
+ overlay_img,
156
+ "Step %d" % (i),
157
+ (10, 40),
158
+ cv2.FONT_HERSHEY_SIMPLEX,
159
+ 1,
160
+ (0, 0, 0),
161
+ 2,
162
+ cv2.LINE_AA,
163
+ False,
164
+ )
165
+ cv2.imshow("overlay", overlay_img)
166
+ cv2.waitKey(1)
167
+ # Update.
168
+ loss = ((result - ref) ** 2).sum()
169
+ LOGGER.info("loss %d: %f", i, loss.item())
170
+ loss.backward()
171
+ optimizer.step()
172
+ # Cleanup.
173
+ with torch.no_grad():
174
+ model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
175
+ # Remove points.
176
+ model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
177
+ model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
178
+ vd = (
179
+ (model.vert_col - torch.ones(3, dtype=torch.float32).to(DEVICE))
180
+ .abs()
181
+ .sum(dim=1)
182
+ )
183
+ model.vert_pos.data[vd <= 0.2] = -1000.0
184
+ LOGGER.info("Done.")
185
+
186
+
187
+ if __name__ == "__main__":
188
+ logging.basicConfig(level=logging.INFO)
189
+ cli()
pytorch3d-0.7.8/docs/generate_stubs.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ """
9
+ This script makes the stubs for implicitron in docs/modules.
10
+ """
11
+
12
+ from pathlib import Path
13
+
14
+ ROOT_DIR = Path(__file__).resolve().parent.parent
15
+
16
+
17
+ def paths_to_modules(paths):
18
+ """
19
+ Given an iterable of paths, return equivalent list of modules.
20
+ """
21
+ return [
22
+ str(i.relative_to(ROOT_DIR))[:-3].replace("/", ".")
23
+ for i in paths
24
+ if "__pycache__" not in str(i)
25
+ ]
26
+
27
+
28
+ def create_one_file(title, description, sources, dest_file):
29
+ with open(dest_file, "w") as f:
30
+ print(title, file=f)
31
+ print("=" * len(title), file=f)
32
+ print(file=f)
33
+ print(description, file=f)
34
+ for source in sources:
35
+ if source.find("._") != -1:
36
+ # ignore internal modules including __init__.py
37
+ continue
38
+ print(f"\n.. automodule:: {source}", file=f)
39
+ print(" :members:", file=f)
40
+ print(" :undoc-members:", file=f)
41
+ print(" :show-inheritance:", file=f)
42
+
43
+
44
+ def iterate_directory(directory_path, dest):
45
+ """
46
+ Create a file for each module in the given path
47
+ """
48
+ toc = []
49
+ if not dest.exists():
50
+ dest.mkdir()
51
+ for file in sorted(directory_path.glob("*.py")):
52
+ if file.stem.startswith("_"):
53
+ continue
54
+ module = paths_to_modules([file])
55
+ create_one_file(module[0], file.stem, module, dest / f"{file.stem}.rst")
56
+ toc.append(file.stem)
57
+
58
+ for subdir in directory_path.iterdir():
59
+ if not subdir.is_dir():
60
+ continue
61
+ if subdir.name == "fb":
62
+ continue
63
+ if subdir.name.startswith("_"):
64
+ continue
65
+ iterate_directory(subdir, dest / (subdir.name))
66
+ toc.append(f"{subdir.name}/index")
67
+
68
+ paths_to_modules_ = paths_to_modules([directory_path.with_suffix(".XX")])
69
+ if len(paths_to_modules_) == 0:
70
+ return
71
+ title = paths_to_modules_[0]
72
+
73
+ with open(dest / "index.rst", "w") as f:
74
+ print(title, file=f)
75
+ print("=" * len(title), file=f)
76
+ print("\n.. toctree::\n", file=f)
77
+ for item in toc:
78
+ print(f" {item}", file=f)
79
+
80
+
81
+ def make_directory_index(title: str, directory_path: Path):
82
+ index_file = directory_path / "index.rst"
83
+ directory_rsts = sorted(directory_path.glob("*.rst"))
84
+ subdirs = sorted([f for f in directory_path.iterdir() if f.is_dir()])
85
+ with open(index_file, "w") as f:
86
+ print(title, file=f)
87
+ print("=" * len(title), file=f)
88
+ print("\n.. toctree::\n", file=f)
89
+ for subdir in subdirs:
90
+ print(f" {subdir.stem}/index.rst", file=f)
91
+ for rst in directory_rsts:
92
+ if rst.stem == "index":
93
+ continue
94
+ print(f" {rst.stem}", file=f)
95
+
96
+
97
+ def do_implicitron():
98
+ DEST_DIR = Path(__file__).resolve().parent / "modules/implicitron"
99
+
100
+ iterate_directory(ROOT_DIR / "pytorch3d/implicitron/models", DEST_DIR / "models")
101
+
102
+ unwanted_tools = ["configurable", "depth_cleanup", "utils"]
103
+ tools_sources = sorted(ROOT_DIR.glob("pytorch3d/implicitron/tools/*.py"))
104
+ tools_modules = [
105
+ str(i.relative_to(ROOT_DIR))[:-3].replace("/", ".")
106
+ for i in tools_sources
107
+ if i.stem not in unwanted_tools
108
+ ]
109
+ create_one_file(
110
+ "pytorch3d.implicitron.tools",
111
+ "Tools for implicitron",
112
+ tools_modules,
113
+ DEST_DIR / "tools.rst",
114
+ )
115
+
116
+ dataset_files = sorted(ROOT_DIR.glob("pytorch3d/implicitron/dataset/*.py"))
117
+ basic_dataset = [
118
+ "dataset_base",
119
+ "dataset_map_provider",
120
+ "data_loader_map_provider",
121
+ "data_source",
122
+ "scene_batch_sampler",
123
+ ]
124
+ basic_dataset_modules = [
125
+ f"pytorch3d.implicitron.dataset.{i}" for i in basic_dataset
126
+ ]
127
+ create_one_file(
128
+ "pytorch3d.implicitron.dataset in general",
129
+ "Basics of data for implicitron",
130
+ basic_dataset_modules,
131
+ DEST_DIR / "data_basics.rst",
132
+ )
133
+
134
+ specific_dataset_files = [
135
+ i for i in dataset_files if i.stem.find("_dataset_map_provider") != -1
136
+ ]
137
+ create_one_file(
138
+ "pytorch3d.implicitron.dataset specific datasets",
139
+ "specific datasets",
140
+ paths_to_modules(specific_dataset_files),
141
+ DEST_DIR / "datasets.rst",
142
+ )
143
+
144
+ evaluation_files = sorted(ROOT_DIR.glob("pytorch3d/implicitron/evaluation/*.py"))
145
+ create_one_file(
146
+ "pytorch3d.implicitron.evaluation",
147
+ "evaluation",
148
+ paths_to_modules(evaluation_files),
149
+ DEST_DIR / "evaluation.rst",
150
+ )
151
+
152
+ make_directory_index("pytorch3d.implicitron", DEST_DIR)
153
+
154
+
155
+ def iterate_toplevel_module(name: str) -> None:
156
+ dest_dir = Path(__file__).resolve().parent / "modules" / name
157
+ iterate_directory(ROOT_DIR / "pytorch3d" / name, dest_dir)
158
+
159
+
160
+ do_implicitron()
161
+ iterate_toplevel_module("renderer")
162
+ iterate_toplevel_module("vis")
pytorch3d-0.7.8/docs/index.rst ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Welcome to PyTorch3D's documentation!
2
+ =====================================
3
+
4
+ PyTorch3D is a library of reusable components for Deep Learning with 3D data.
5
+
6
+ Table of Contents
7
+ =================
8
+
9
+ .. toctree::
10
+ :maxdepth: 2
11
+
12
+ overview
13
+
14
+ .. toctree::
15
+ :maxdepth: 2
16
+
17
+ modules/index
pytorch3d-0.7.8/docs/modules/common.rst ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ pytorch3d.common
2
+ ===========================
3
+
4
+ .. automodule:: pytorch3d.common
5
+ :members:
6
+ :undoc-members:
pytorch3d-0.7.8/docs/modules/datasets.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.datasets
2
+ ===========================
3
+
4
+ Dataset loaders for datasets including ShapeNetCore.
5
+
6
+ .. automodule:: pytorch3d.datasets
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/data_basics.rst ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.dataset in general
2
+ ========================================
3
+
4
+ Basics of data for implicitron
5
+
6
+ .. automodule:: pytorch3d.implicitron.dataset.dataset_base
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
10
+
11
+ .. automodule:: pytorch3d.implicitron.dataset.dataset_map_provider
12
+ :members:
13
+ :undoc-members:
14
+ :show-inheritance:
15
+
16
+ .. automodule:: pytorch3d.implicitron.dataset.data_loader_map_provider
17
+ :members:
18
+ :undoc-members:
19
+ :show-inheritance:
20
+
21
+ .. automodule:: pytorch3d.implicitron.dataset.data_source
22
+ :members:
23
+ :undoc-members:
24
+ :show-inheritance:
25
+
26
+ .. automodule:: pytorch3d.implicitron.dataset.scene_batch_sampler
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/datasets.rst ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.dataset specific datasets
2
+ ===============================================
3
+
4
+ specific datasets
5
+
6
+ .. automodule:: pytorch3d.implicitron.dataset.blender_dataset_map_provider
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
10
+
11
+ .. automodule:: pytorch3d.implicitron.dataset.json_index_dataset_map_provider
12
+ :members:
13
+ :undoc-members:
14
+ :show-inheritance:
15
+
16
+ .. automodule:: pytorch3d.implicitron.dataset.json_index_dataset_map_provider_v2
17
+ :members:
18
+ :undoc-members:
19
+ :show-inheritance:
20
+
21
+ .. automodule:: pytorch3d.implicitron.dataset.llff_dataset_map_provider
22
+ :members:
23
+ :undoc-members:
24
+ :show-inheritance:
25
+
26
+ .. automodule:: pytorch3d.implicitron.dataset.rendered_mesh_dataset_map_provider
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/evaluation.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.evaluation
2
+ ================================
3
+
4
+ evaluation
5
+
6
+ .. automodule:: pytorch3d.implicitron.evaluation.evaluate_new_view_synthesis
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
10
+
11
+ .. automodule:: pytorch3d.implicitron.evaluation.evaluator
12
+ :members:
13
+ :undoc-members:
14
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/index.rst ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron
2
+ =====================
3
+
4
+ .. toctree::
5
+
6
+ models/index.rst
7
+ data_basics
8
+ datasets
9
+ evaluation
10
+ tools
pytorch3d-0.7.8/docs/modules/implicitron/models/base_model.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.base_model
2
+ =======================================
3
+
4
+ base_model
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.base_model
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/models/feature_extractor/feature_extractor.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.feature_extractor.feature_extractor
2
+ ================================================================
3
+
4
+ feature_extractor
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.feature_extractor.feature_extractor
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/models/feature_extractor/index.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.feature_extractor
2
+ ==============================================
3
+
4
+ .. toctree::
5
+
6
+ feature_extractor
7
+ resnet_feature_extractor
pytorch3d-0.7.8/docs/modules/implicitron/models/feature_extractor/resnet_feature_extractor.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.feature_extractor.resnet_feature_extractor
2
+ =======================================================================
3
+
4
+ resnet_feature_extractor
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.feature_extractor.resnet_feature_extractor
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/models/generic_model.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.generic_model
2
+ ==========================================
3
+
4
+ generic_model
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.generic_model
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/models/global_encoder/autodecoder.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.global_encoder.autodecoder
2
+ =======================================================
3
+
4
+ autodecoder
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.global_encoder.autodecoder
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/models/global_encoder/global_encoder.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.global_encoder.global_encoder
2
+ ==========================================================
3
+
4
+ global_encoder
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.global_encoder.global_encoder
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
pytorch3d-0.7.8/docs/modules/implicitron/models/global_encoder/index.rst ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.global_encoder
2
+ ===========================================
3
+
4
+ .. toctree::
5
+
6
+ autodecoder
7
+ global_encoder
pytorch3d-0.7.8/docs/modules/implicitron/models/implicit_function/base.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.implicit_function.base
2
+ ===================================================
3
+
4
+ base
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.implicit_function.base
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance: