{ "metadata": { "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python", "version": "3.12.12", "mimetype": "text/x-python", "codemirror_mode": { "name": "ipython", "version": 3 }, "pygments_lexer": "ipython3", "nbconvert_exporter": "python", "file_extension": ".py" }, "kaggle": { "accelerator": "none", "dataSources": [], "dockerImageVersionId": 31259, "isInternetEnabled": true, "language": "python", "sourceType": "notebook", "isGpuEnabled": false }, "colab": { "provenance": [], "gpuType": "T4" }, "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { "bf44c57e4723401c955d48649116565a": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_ef6ea005765742188b1cd55fb4530ef9", "IPY_MODEL_9167fbd8a70c4639b2f251343eb9a981", "IPY_MODEL_ccb3263efe67459c87d818292682fd3f" ], "layout": "IPY_MODEL_06d8bf25667c49aab29956b05feb0abb" } }, "ef6ea005765742188b1cd55fb4530ef9": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_e9848f5bd529467fa99163655c7f754c", "placeholder": "​", "style": "IPY_MODEL_d9e37e5619c04646b3d331b56a0e8745", "value": "preprocessor_config.json: 100%" } }, "9167fbd8a70c4639b2f251343eb9a981": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_e11a930ed5164a35a385a636875a16f6", "max": 436, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_eb7acaad8f554949a81496e0aad9ccd9", "value": 436 } }, "ccb3263efe67459c87d818292682fd3f": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0f856bd88fc645b0baaa4633fd0f58bd", "placeholder": "​", "style": "IPY_MODEL_c779b806854d4a7cbd20eea5760a2efc", "value": " 436/436 [00:00<00:00, 33.6kB/s]" } }, "06d8bf25667c49aab29956b05feb0abb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e9848f5bd529467fa99163655c7f754c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "d9e37e5619c04646b3d331b56a0e8745": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "e11a930ed5164a35a385a636875a16f6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "eb7acaad8f554949a81496e0aad9ccd9": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "0f856bd88fc645b0baaa4633fd0f58bd": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c779b806854d4a7cbd20eea5760a2efc": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "b2cefd6f37f74514a4fd9f449102d61b": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_ef2421d71fbf4fb2b453d87b7b9a907c", "IPY_MODEL_a7e7034a318b4c9784d6aaebd36748c9", "IPY_MODEL_806fa9c3a1c049f1adaa96bdc3271141" ], "layout": "IPY_MODEL_2fb6f473b5234fadb9cb1d06207309a3" } }, "ef2421d71fbf4fb2b453d87b7b9a907c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_52342bdb5b664bcbbbc1479fb8f30885", "placeholder": "​", "style": "IPY_MODEL_c0e66b36c9d6473b98d2b4f546b39ffb", "value": "config.json: 100%" } }, "a7e7034a318b4c9784d6aaebd36748c9": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_f95c52cb012d4ba89580b8ed57aaa4b8", "max": 548, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_b1fc0cb4fd794ee4bcc52a173dde94f0", "value": 548 } }, "806fa9c3a1c049f1adaa96bdc3271141": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0861cfd48ac04e6da8b267de142f8580", "placeholder": "​", "style": "IPY_MODEL_5f127d1dadd44aa28367423d1e692967", "value": " 548/548 [00:00<00:00, 50.4kB/s]" } }, "2fb6f473b5234fadb9cb1d06207309a3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "52342bdb5b664bcbbbc1479fb8f30885": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c0e66b36c9d6473b98d2b4f546b39ffb": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "f95c52cb012d4ba89580b8ed57aaa4b8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b1fc0cb4fd794ee4bcc52a173dde94f0": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "0861cfd48ac04e6da8b267de142f8580": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "5f127d1dadd44aa28367423d1e692967": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "1df1dd60ae4244678430258b4929ec41": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_9ac5ee7dc2bb40748fa9fc90139a8eeb", "IPY_MODEL_9e6232a28a474a299b7fce684cf48a36", "IPY_MODEL_870ec434cc9346ada1bbb0ddcaa62f4c" ], "layout": "IPY_MODEL_2979f087a4e549318b6024a20220d5d3" } }, "9ac5ee7dc2bb40748fa9fc90139a8eeb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_6c5d3c940f3147cb916acc9cd6a7a5e1", "placeholder": "​", "style": "IPY_MODEL_c16b9aef215342459cbac253f9b7b87e", "value": "model.safetensors: 100%" } }, "9e6232a28a474a299b7fce684cf48a36": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_49ddba95962049e3b022a57bd7d49a19", "max": 346345912, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_36e9a773b2ea47799f31917188c016d7", "value": 346345912 } }, "870ec434cc9346ada1bbb0ddcaa62f4c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_702e4c196d10471da4230ee0c3c31a28", "placeholder": "​", "style": "IPY_MODEL_8d61b0a13356489d9c279ff8b74f29ad", "value": " 346M/346M [00:11<00:00, 29.1MB/s]" } }, "2979f087a4e549318b6024a20220d5d3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "6c5d3c940f3147cb916acc9cd6a7a5e1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c16b9aef215342459cbac253f9b7b87e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "49ddba95962049e3b022a57bd7d49a19": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "36e9a773b2ea47799f31917188c016d7": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "702e4c196d10471da4230ee0c3c31a28": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "8d61b0a13356489d9c279ff8b74f29ad": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } } } } }, "nbformat_minor": 0, "nbformat": 4, "cells": [ { "cell_type": "code", "source": [], "metadata": { "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "trusted": true, "execution": { "iopub.status.busy": "2026-01-22T11:23:22.240664Z", "iopub.execute_input": "2026-01-22T11:23:22.240957Z", "iopub.status.idle": "2026-01-22T11:23:22.246018Z", "shell.execute_reply.started": "2026-01-22T11:23:22.240936Z", "shell.execute_reply": "2026-01-22T11:23:22.245074Z" }, "id": "yhVNR6GETKyA" }, "outputs": [], "execution_count": 3 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# biplet-asmk-mast3r-ps2-gs-colab\n", "#\n", "# =====================================================================\n", "\n", "# =====================================================================\n", "# CELL 1: Install Dependencies\n", "# =====================================================================\n", "!pip install roma einops timm huggingface_hub\n", "!pip install opencv-python pillow tqdm pyaml cython plyfile\n", "!pip install pycolmap trimesh\n", "!pip install transformers==4.40.0 # DINOに必要\n", "!pip uninstall -y numpy scipy\n", "!pip install numpy==1.26.4 scipy==1.11.4\n", "break" ], "metadata": { "trusted": true, "id": "6C3QGJD8TKyC", "colab": { "base_uri": "https://localhost:8080/", "height": 1000 }, "outputId": "563eab81-b2f2-42fa-be0a-a3ab208a4bfb" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Requirement already satisfied: roma in /usr/local/lib/python3.12/dist-packages (1.5.4)\n", "Requirement already satisfied: einops in /usr/local/lib/python3.12/dist-packages (0.8.1)\n", "Requirement already satisfied: timm in /usr/local/lib/python3.12/dist-packages (1.0.24)\n", "Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\n", "Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from timm) (2.9.0+cu126)\n", "Requirement already satisfied: torchvision in /usr/local/lib/python3.12/dist-packages (from timm) (0.24.0+cu126)\n", "Requirement already satisfied: pyyaml in /usr/local/lib/python3.12/dist-packages (from timm) (6.0.3)\n", "Requirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (from timm) (0.7.0)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (3.20.3)\n", "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2025.3.0)\n", "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (25.0)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2.32.4)\n", "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.67.1)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.15.0)\n", "Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (1.2.0)\n", "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.4.4)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.11)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2.5.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2026.1.4)\n", "Requirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch->timm) (75.2.0)\n", "Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (1.14.0)\n", "Requirement already satisfied: networkx>=2.5.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.6.1)\n", "Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.1.6)\n", "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\n", "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\n", "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.80)\n", "Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (9.10.2.21)\n", "Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.4.1)\n", "Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (11.3.0.4)\n", "Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (10.3.7.77)\n", "Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (11.7.1.2)\n", "Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.5.4.2)\n", "Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (0.7.1)\n", "Requirement already satisfied: nvidia-nccl-cu12==2.27.5 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (2.27.5)\n", "Requirement already satisfied: nvidia-nvshmem-cu12==3.3.20 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.3.20)\n", "Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\n", "Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.85)\n", "Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (1.11.1.6)\n", "Requirement already satisfied: triton==3.5.0 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.5.0)\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from torchvision->timm) (2.2.6)\n", "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.12/dist-packages (from torchvision->timm) (11.3.0)\n", "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->timm) (1.3.0)\n", "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch->timm) (3.0.3)\n", "Requirement already satisfied: opencv-python in /usr/local/lib/python3.12/dist-packages (4.12.0.88)\n", "Requirement already satisfied: pillow in /usr/local/lib/python3.12/dist-packages (11.3.0)\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\n", "Requirement already satisfied: pyaml in /usr/local/lib/python3.12/dist-packages (25.7.0)\n", "Requirement already satisfied: cython in /usr/local/lib/python3.12/dist-packages (3.0.12)\n", "Requirement already satisfied: plyfile in /usr/local/lib/python3.12/dist-packages (1.1.3)\n", "Requirement already satisfied: numpy<2.3.0,>=2 in /usr/local/lib/python3.12/dist-packages (from opencv-python) (2.2.6)\n", "Requirement already satisfied: PyYAML in /usr/local/lib/python3.12/dist-packages (from pyaml) (6.0.3)\n", "Requirement already satisfied: pycolmap in /usr/local/lib/python3.12/dist-packages (3.13.0)\n", "Requirement already satisfied: trimesh in /usr/local/lib/python3.12/dist-packages (4.11.1)\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from pycolmap) (2.2.6)\n", "Collecting transformers==4.40.0\n", " Downloading transformers-4.40.0-py3-none-any.whl.metadata (137 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.6/137.6 kB\u001b[0m \u001b[31m3.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (3.20.3)\n", "Requirement already satisfied: huggingface-hub<1.0,>=0.19.3 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (0.36.0)\n", "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (2.2.6)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (25.0)\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (6.0.3)\n", "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (2025.11.3)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (2.32.4)\n", "Collecting tokenizers<0.20,>=0.19 (from transformers==4.40.0)\n", " Downloading tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.7 kB)\n", "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (0.7.0)\n", "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (4.67.1)\n", "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers==4.40.0) (2025.3.0)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers==4.40.0) (4.15.0)\n", "Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers==4.40.0) (1.2.0)\n", "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (3.4.4)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (3.11)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (2.5.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (2026.1.4)\n", "Downloading transformers-4.40.0-py3-none-any.whl (9.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.0/9.0 MB\u001b[0m \u001b[31m83.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.6/3.6 MB\u001b[0m \u001b[31m124.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: tokenizers, transformers\n", " Attempting uninstall: tokenizers\n", " Found existing installation: tokenizers 0.22.2\n", " Uninstalling tokenizers-0.22.2:\n", " Successfully uninstalled tokenizers-0.22.2\n", " Attempting uninstall: transformers\n", " Found existing installation: transformers 4.57.6\n", " Uninstalling transformers-4.57.6:\n", " Successfully uninstalled transformers-4.57.6\n", "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "sentence-transformers 5.2.0 requires transformers<6.0.0,>=4.41.0, but you have transformers 4.40.0 which is incompatible.\u001b[0m\u001b[31m\n", "\u001b[0mSuccessfully installed tokenizers-0.19.1 transformers-4.40.0\n" ] }, { "output_type": "display_data", "data": { "application/vnd.colab-display-data+json": { "pip_warning": { "packages": [ "tokenizers", "transformers" ] }, "id": "83c62ed5fb6c46acbd0ddc06e987cde0" } }, "metadata": {} }, { "output_type": "stream", "name": "stdout", "text": [ "Found existing installation: numpy 2.2.6\n", "Uninstalling numpy-2.2.6:\n", " Successfully uninstalled numpy-2.2.6\n", "Found existing installation: scipy 1.11.4\n", "Uninstalling scipy-1.11.4:\n", " Successfully uninstalled scipy-1.11.4\n", "Collecting numpy==1.26.4\n", " Using cached numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (61 kB)\n", "\u001b[31mERROR: Operation cancelled by user\u001b[0m\u001b[31m\n", "\u001b[0mTraceback (most recent call last):\n", " File \"/usr/local/lib/python3.12/dist-packages/pip/_internal/cli/base_command.py\", line 179, in exc_logging_wrapper\n", " status = run_func(*args)\n", " ^^^^^^^^^^^^^^^\n", " File \"/usr/local/lib/python3.12/dist-packages/pip/_internal/cli/req_command.py\", line 67, in wrapper\n", " return func(self, options, args)\n", " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", " File \"/usr/local/lib/python3.12/dist-packages/pip/_internal/commands/install.py\", line 377, in run\n", " requirement_set = resolver.resolve(\n", " ^^^^^^^^^^^^^^^^^\n", " File \"/usr/local/lib/python3.12/dist-packages/pip/_internal/resolution/resolvelib/resolver.py\", line 95, in resolve\n", " result = self._result = resolver.resolve(\n", " ^^^^^^^^^^^^^^^^^\n", " File \"/usr/local/lib/python3.12/dist-packages/pip/_vendor/resolvelib/resolvers.py\", line 546, in resolve\n", "^C\n" ] }, { "output_type": "error", "ename": "SyntaxError", "evalue": "'break' outside loop (ipython-input-2684411473.py, line 15)", "traceback": [ "\u001b[0;36m File \u001b[0;32m\"/tmp/ipython-input-2684411473.py\"\u001b[0;36m, line \u001b[0;32m15\u001b[0m\n\u001b[0;31m break\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m 'break' outside loop\n" ] } ], "execution_count": 4 }, { "cell_type": "code", "source": [], "metadata": { "id": "49QM1qVmdm4k" }, "outputs": [], "execution_count": null }, { "cell_type": "code", "source": [], "metadata": { "id": "bSUbLgHpeeJ4" }, "outputs": [], "execution_count": 38 }, { "cell_type": "code", "source": [ "# restart & run after\n", "# =====================================================================\n", "# CELL 2: Mount Drive and Verify\n", "# =====================================================================\n", "from google.colab import drive\n", "drive.mount('/content/drive')\n", "\n", "import numpy as np\n", "print(f\"✓ np: {np.__version__} - {np.__file__}\")\n", "!pip show numpy | grep Version\n", "\n", "try:\n", " import roma\n", " print(\"✓ roma is installed\")\n", "except ModuleNotFoundError:\n", " print(\"⚠️ roma not found, installing...\")\n", " !pip install roma\n", " import roma\n", " print(\"✓ roma installed\")\n", "\n", "# =====================================================================\n", "# CELL 3: Clone Repositories\n", "# =====================================================================\n", "import os\n", "import sys\n", "\n", "# MASt3Rをクローン\n", "if not os.path.exists('/content/mast3r'):\n", " print(\"Cloning MASt3R repository...\")\n", " !git clone --recursive https://github.com/naver/mast3r.git /content/mast3r\n", " print(\"✓ MASt3R cloned\")\n", "else:\n", " print(\"✓ MASt3R already exists\")\n", "\n", "# DUSt3Rをクローン(MASt3R内に必要)\n", "if not os.path.exists('/content/mast3r/dust3r'):\n", " print(\"Cloning DUSt3R repository...\")\n", " !git clone --recursive https://github.com/naver/dust3r.git /content/mast3r/dust3r\n", " print(\"✓ DUSt3R cloned\")\n", "else:\n", " print(\"✓ DUSt3R already exists\")\n", "\n", "# パスを追加\n", "sys.path.insert(0, '/content/mast3r')\n", "sys.path.insert(0, '/content/mast3r/dust3r')\n", "\n", "# 確認\n", "try:\n", " from dust3r.model import AsymmetricCroCo3DStereo\n", " print(\"✓ dust3r.model imported successfully\")\n", "except ImportError as e:\n", " print(f\"✗ Import error: {e}\")\n", "\n", "# croco(MASt3Rの依存関係)もクローン\n", "if not os.path.exists('/content/mast3r/croco'):\n", " print(\"Cloning CroCo repository...\")\n", " !git clone --recursive https://github.com/naver/croco.git /content/mast3r/croco\n", " print(\"✓ CroCo cloned\")\n", "\n", "# =====================================================================\n", "# CELL 4: Clone and Build Gaussian Splatting\n", "# =====================================================================\n", "print(\"\\n\" + \"=\"*70)\n", "print(\"STEP: Clone Gaussian Splatting\")\n", "print(\"=\"*70)\n", "WORK_DIR = \"/content/gaussian-splatting\"\n", "\n", "import subprocess\n", "if not os.path.exists(WORK_DIR):\n", " subprocess.run([\n", " \"git\", \"clone\", \"--recursive\",\n", " \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n", " WORK_DIR\n", " ], capture_output=True)\n", " print(\"✓ Cloned\")\n", "else:\n", " print(\"✓ Already exists\")\n", "\n", "# インストールが必要なディレクトリ\n", "submodules = [\n", " \"/content/gaussian-splatting/submodules/diff-gaussian-rasterization\",\n", " \"/content/gaussian-splatting/submodules/simple-knn\"\n", "]\n", "\n", "for path in submodules:\n", " print(f\"Installing {path}...\")\n", " subprocess.run([\"pip\", \"install\", path], check=True)\n", "\n", "print(\"✓ Custom CUDA modules installed.\")\n", "\n", "print(f\"✓ np: {np.__version__} - {np.__file__}\")\n", "!pip show numpy | grep Version" ], "metadata": { "id": "TPcj5qcmedBw", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "352b3702-72af-4c2e-df69-50048aa6f5ef" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n", "✓ np: 2.0.2 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\n", "Version: 2.0.2\n", "Version 3.1, 31 March 2009\n", " Version 3, 29 June 2007\n", " 5. Conveying Modified Source Versions.\n", " 14. Revised Versions of this License.\n", "✓ roma is installed\n", "✓ MASt3R already exists\n", "✓ DUSt3R already exists\n", "✓ dust3r.model imported successfully\n", "\n", "======================================================================\n", "STEP: Clone Gaussian Splatting\n", "======================================================================\n", "✓ Already exists\n", "Installing /content/gaussian-splatting/submodules/diff-gaussian-rasterization...\n", "Installing /content/gaussian-splatting/submodules/simple-knn...\n", "✓ Custom CUDA modules installed.\n", "✓ np: 2.0.2 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\n", "Version: 2.0.2\n", "Version 3.1, 31 March 2009\n", " Version 3, 29 June 2007\n", " 5. Conveying Modified Source Versions.\n", " 14. Revised Versions of this License.\n" ] } ], "execution_count": 73 }, { "cell_type": "code", "source": [ "\n", "# =====================================================================\n", "# CELL 5: Import Core Libraries and Configure Memory\n", "# =====================================================================\n", "import os\n", "import sys\n", "import gc\n", "import torch\n", "import numpy as np\n", "from pathlib import Path\n", "from tqdm import tqdm\n", "import torch.nn.functional as F\n", "import shutil\n", "from PIL import Image\n", "from transformers import AutoImageProcessor, AutoModel\n", "\n", "# MEMORY MANAGEMENT\n", "os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'\n", "\n", "def clear_memory():\n", " \"\"\"メモリクリア関数\"\"\"\n", " gc.collect()\n", " if torch.cuda.is_available():\n", " torch.cuda.empty_cache()\n", " torch.cuda.synchronize()\n", "\n", "def get_memory_info():\n", " \"\"\"Get current memory usage\"\"\"\n", " if torch.cuda.is_available():\n", " allocated = torch.cuda.memory_allocated() / 1024**3\n", " reserved = torch.cuda.memory_reserved() / 1024**3\n", " print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n", "\n", " import psutil\n", " cpu_mem = psutil.virtual_memory().percent\n", " print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n", "\n", "# CONFIGURATION\n", "class Config:\n", " DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " MAST3R_WEIGHTS = \"naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\"\n", " DUST3R_WEIGHTS = \"naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\"\n", "\n", " # DINO設定\n", " DINO_MODEL = \"facebook/dinov2-base\"\n", " GLOBAL_TOPK = 20 # 各画像がペアを組む上位K個\n", " RETRIEVAL_TOPK = 10 # ← この1行を追加\n", "\n", " IMAGE_SIZE = 224\n", "# =====================================================================\n", "# CELL 6: Image Preprocessing Functions (Biplet)\n", "# =====================================================================\n", "def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n", " \"\"\"\n", " Generates two square crops (Left & Right or Top & Bottom)\n", " from each image in a directory.\n", " \"\"\"\n", " if output_dir is None:\n", " output_dir = input_dir + \"_biplet\"\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " print(f\"\\n=== Generating Biplet Crops ({size}x{size}) ===\")\n", "\n", " converted_count = 0\n", " size_stats = {}\n", "\n", " for img_file in tqdm(sorted(os.listdir(input_dir)), desc=\"Creating biplets\"):\n", " if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n", " continue\n", "\n", " input_path = os.path.join(input_dir, img_file)\n", "\n", " try:\n", " img = Image.open(input_path)\n", " original_size = img.size\n", "\n", " size_key = f\"{original_size[0]}x{original_size[1]}\"\n", " size_stats[size_key] = size_stats.get(size_key, 0) + 1\n", "\n", " # Generate 2 crops\n", " crops = generate_two_crops(img, size)\n", "\n", " base_name, ext = os.path.splitext(img_file)\n", " for mode, cropped_img in crops.items():\n", " output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n", " cropped_img.save(output_path, quality=95)\n", "\n", " converted_count += 1\n", "\n", " except Exception as e:\n", " print(f\" ✗ Error processing {img_file}: {e}\")\n", "\n", " print(f\"\\n✓ Biplet generation complete:\")\n", " print(f\" Source images: {converted_count}\")\n", " print(f\" Biplet crops generated: {converted_count * 2}\")\n", " print(f\" Original size distribution: {size_stats}\")\n", "\n", " return output_dir\n", "\n", "\n", "def generate_two_crops(img, size):\n", " \"\"\"\n", " Crops the image into a square and returns 2 variations\n", " \"\"\"\n", " width, height = img.size\n", " crop_size = min(width, height)\n", " crops = {}\n", "\n", " if width > height:\n", " # Landscape → Left & Right\n", " positions = {\n", " 'left': 0,\n", " 'right': width - crop_size\n", " }\n", " for mode, x_offset in positions.items():\n", " box = (x_offset, 0, x_offset + crop_size, crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", " else:\n", " # Portrait or Square → Top & Bottom\n", " positions = {\n", " 'top': 0,\n", " 'bottom': height - crop_size\n", " }\n", " for mode, y_offset in positions.items():\n", " box = (0, y_offset, crop_size, y_offset + crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " return crops\n" ], "metadata": { "id": "gGSu2_vf8Az-" }, "execution_count": 74, "outputs": [] }, { "cell_type": "code", "source": [ "\n", "\n", "# =====================================================================\n", "# CELL 7: Image Loading Function\n", "# =====================================================================\n", "def load_images_from_directory(image_dir, max_images=200):\n", " \"\"\"ディレクトリから画像をロード\"\"\"\n", " print(f\"\\nLoading images from: {image_dir}\")\n", "\n", " valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp'}\n", " image_paths = []\n", "\n", " for ext in valid_extensions:\n", " image_paths.extend(sorted(Path(image_dir).glob(f'*{ext}')))\n", " image_paths.extend(sorted(Path(image_dir).glob(f'*{ext.upper()}')))\n", "\n", " image_paths = sorted(set(str(p) for p in image_paths))\n", "\n", " if len(image_paths) > max_images:\n", " print(f\"⚠️ Limiting from {len(image_paths)} to {max_images} images\")\n", " image_paths = image_paths[:max_images]\n", "\n", " print(f\"✓ Found {len(image_paths)} images\")\n", " return image_paths\n", "\n", "# =====================================================================\n", "# CELL 8: MASt3R Model Loading\n", "# =====================================================================\n", "def load_mast3r_model(device):\n", " \"\"\"MASt3Rモデルをロード\"\"\"\n", " print(\"\\n=== Loading MASt3R Model ===\")\n", "\n", " if '/content/mast3r' not in sys.path:\n", " sys.path.insert(0, '/content/mast3r')\n", " if '/content/mast3r/dust3r' not in sys.path:\n", " sys.path.insert(0, '/content/mast3r/dust3r')\n", "\n", " from dust3r.model import AsymmetricCroCo3DStereo\n", "\n", " try:\n", " print(f\"Attempting to load: {Config.MAST3R_WEIGHTS}\")\n", " model = AsymmetricCroCo3DStereo.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n", " print(\"✓ Loaded MASt3R model\")\n", " except Exception as e:\n", " print(f\"⚠️ Failed to load MASt3R: {e}\")\n", " print(f\"Trying DUSt3R instead: {Config.DUST3R_WEIGHTS}\")\n", " model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUST3R_WEIGHTS).to(device)\n", " print(\"✓ Loaded DUSt3R model as fallback\")\n", "\n", " model.eval()\n", " print(f\"✓ Model loaded on {device}\")\n", " return model" ], "metadata": { "trusted": true, "id": "OWJEB1oQTKyD" }, "outputs": [], "execution_count": 75 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 12: Feature Extraction (FIXED)\n", "# =====================================================================\n", "def extract_mast3r_features(model, image_paths, device, batch_size=1):\n", " \"\"\"MASt3Rモデルを使用して特徴量を抽出(修正版)\"\"\"\n", " print(\"\\n=== Extracting MASt3R Features ===\")\n", " from dust3r.utils.image import load_images\n", " from dust3r.inference import inference\n", "\n", " all_features = []\n", "\n", " for i in tqdm(range(len(image_paths)), desc=\"Features\"):\n", " img_path = image_paths[i]\n", "\n", " # 同じ画像を2回ロード(ペアとして)\n", " images = load_images([img_path, img_path], size=Config.IMAGE_SIZE)\n", " pairs = [(images[0], images[1])]\n", "\n", " with torch.no_grad():\n", " output = inference(pairs, model, device, batch_size=1)\n", "\n", " try:\n", " # outputから特徴量を抽出(修正版)\n", " if isinstance(output, dict):\n", " if 'pred1' in output:\n", " pred1 = output['pred1']\n", " if isinstance(pred1, dict):\n", " # 'desc'または'conf'を優先的に使用\n", " if 'desc' in pred1:\n", " desc = pred1['desc']\n", " elif 'conf' in pred1:\n", " desc = pred1['conf']\n", " elif 'pts3d' in pred1:\n", " desc = pred1['pts3d']\n", " else:\n", " desc = list(pred1.values())[0]\n", " else:\n", " desc = pred1\n", " elif 'view1' in output:\n", " view1 = output['view1']\n", " if isinstance(view1, dict):\n", " desc = view1.get('desc', view1.get('conf', view1.get('pts3d', list(view1.values())[0])))\n", " else:\n", " desc = view1\n", " else:\n", " desc = list(output.values())[0]\n", " elif isinstance(output, tuple) and len(output) == 2:\n", " view1, view2 = output\n", " if isinstance(view1, dict):\n", " desc = view1.get('desc', view1.get('conf', view1.get('pts3d', list(view1.values())[0])))\n", " else:\n", " desc = view1\n", " elif isinstance(output, list):\n", " item = output[0]\n", " if isinstance(item, dict):\n", " desc = item.get('desc', item.get('conf', item.get('pts3d', list(item.values())[0])))\n", " else:\n", " desc = item\n", " else:\n", " desc = output\n", "\n", " # テンソルをCPUに移動して保存\n", " if isinstance(desc, torch.Tensor):\n", " desc = desc.detach().cpu()\n", "\n", " # 4次元の場合はbatch次元を削除\n", " if desc.dim() == 4:\n", " desc = desc.squeeze(0)\n", "\n", " # 特徴量の次元が小さすぎる場合(RGB画像など)は平均プーリング\n", " if desc.shape[-1] < 16:\n", " # [H, W, 3] -> [H, W, 64] に拡張\n", " desc = desc.unsqueeze(-1).repeat(1, 1, 1, 64 // desc.shape[-1]).reshape(desc.shape[0], desc.shape[1], -1)\n", "\n", " all_features.append(desc)\n", "\n", " except Exception as e:\n", " print(f\"⚠️ Error extracting features for image {i}: {e}\")\n", " # デフォルト特徴量\n", " all_features.append(torch.zeros((Config.IMAGE_SIZE, Config.IMAGE_SIZE, 64)))\n", "\n", " # メモリクリア\n", " del output, images, pairs\n", " if i % 10 == 0:\n", " torch.cuda.empty_cache()\n", "\n", " print(f\"✓ Extracted features for {len(all_features)} images\")\n", " if all_features:\n", " first_feat = all_features[0]\n", " if isinstance(first_feat, torch.Tensor):\n", " print(f\" Feature shape: {first_feat.shape}\")\n", "\n", " return all_features\n", "\n" ], "metadata": { "id": "CbO4hwKQqDUb" }, "execution_count": 76, "outputs": [] }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 13: ASMK Similarity Computation (FIXED)\n", "# =====================================================================\n", "def compute_asmk_similarity(features, codebook=None):\n", " \"\"\"ASMKを使用して類似度行列を計算(修正版)\"\"\"\n", " print(\"\\n=== Computing ASMK Similarity ===\")\n", "\n", " n_images = len(features)\n", " similarity_matrix = np.zeros((n_images, n_images), dtype=np.float32)\n", "\n", " # 各特徴量をグローバル記述子に変換\n", " global_features = []\n", "\n", " for feat in features:\n", " if isinstance(feat, dict):\n", " for key in ['desc', 'conf', 'pts3d']:\n", " if key in feat:\n", " feat = feat[key]\n", " break\n", "\n", " if isinstance(feat, torch.Tensor):\n", " feat = feat.cpu().numpy()\n", "\n", " if isinstance(feat, np.ndarray):\n", " if feat.ndim == 3: # [H, W, C]\n", " feat_flat = feat.reshape(-1, feat.shape[-1])\n", " elif feat.ndim == 2: # [N, C]\n", " feat_flat = feat\n", " else:\n", " feat_flat = feat.reshape(-1, max(feat.shape))\n", "\n", " global_desc = np.mean(feat_flat, axis=0)\n", " global_features.append(global_desc)\n", " else:\n", " # ダミー特徴量\n", " global_features.append(np.zeros(64))\n", "\n", " global_features = np.stack(global_features)\n", " feature_dim = global_features.shape[1]\n", "\n", " print(f\"Global features shape: {global_features.shape}\")\n", "\n", " # コサイン類似度を使用\n", " global_features_norm = global_features / (np.linalg.norm(global_features, axis=1, keepdims=True) + 1e-8)\n", " similarity_matrix = global_features_norm @ global_features_norm.T\n", "\n", " np.fill_diagonal(similarity_matrix, -1)\n", "\n", " print(f\"Similarity matrix shape: {similarity_matrix.shape}\")\n", " print(f\"Similarity range: [{similarity_matrix.min():.3f}, {similarity_matrix.max():.3f}]\")\n", "\n", " return similarity_matrix\n", "\n", "\n", "def build_pairs_from_similarity(similarity_matrix, top_k=10):\n", " \"\"\"類似度行列からペアを構築\"\"\"\n", " n_images = similarity_matrix.shape[0]\n", " pairs = []\n", "\n", " for i in range(n_images):\n", " similarities = similarity_matrix[i]\n", " top_indices = np.argsort(similarities)[::-1][:top_k]\n", "\n", " for j in top_indices:\n", " if j > i:\n", " pairs.append((i, j))\n", "\n", " pairs = list(set(pairs))\n", " print(f\"✓ Built {len(pairs)} unique pairs\")\n", "\n", " return pairs\n", "\n", "\n", "def get_image_pairs_asmk(image_paths, max_pairs=100):\n", " \"\"\"ASMKを使用して画像ペアを取得\"\"\"\n", " print(\"\\n=== Getting Image Pairs with ASMK ===\")\n", "\n", " device = Config.DEVICE\n", " model = load_mast3r_model(device)\n", " features = extract_mast3r_features(model, image_paths, device)\n", " similarity_matrix = compute_asmk_similarity(features)\n", " pairs = build_pairs_from_similarity(similarity_matrix, Config.RETRIEVAL_TOPK)\n", "\n", " # モデルを解放\n", " del model\n", " clear_memory()\n", "\n", " if len(pairs) > max_pairs:\n", " pairs = pairs[:max_pairs]\n", " print(f\"Limited to {max_pairs} pairs\")\n", "\n", " return pairs" ], "metadata": { "trusted": true, "id": "ktrOug8RlbLq" }, "outputs": [], "execution_count": 77 }, { "cell_type": "markdown", "source": [ "---" ], "metadata": { "id": "89w-uyeylbLq" } }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 9: DINO Pair Selection (REPLACES ASMK)\n", "# =====================================================================\n", "def load_torch_image(fname, device):\n", " \"\"\"Load image as torch tensor\"\"\"\n", " import torchvision.transforms as T\n", "\n", " img = Image.open(fname).convert('RGB')\n", " transform = T.Compose([\n", " T.ToTensor(),\n", " ])\n", " return transform(img).unsqueeze(0).to(device)\n", "\n", "def extract_dino_global(image_paths, model_path, device):\n", " \"\"\"Extract DINO global descriptors with memory management\"\"\"\n", " print(\"\\n=== Extracting DINO Global Features ===\")\n", " print(\"Initial memory state:\")\n", " get_memory_info()\n", "\n", " processor = AutoImageProcessor.from_pretrained(model_path)\n", " model = AutoModel.from_pretrained(model_path).eval().to(device)\n", "\n", " global_descs = []\n", " batch_size = 4 # Small batch to save memory\n", "\n", " for i in tqdm(range(0, len(image_paths), batch_size), desc=\"DINO extraction\"):\n", " batch_paths = image_paths[i:i+batch_size]\n", " batch_imgs = []\n", "\n", " for img_path in batch_paths:\n", " img = load_torch_image(img_path, device)\n", " batch_imgs.append(img)\n", "\n", " batch_tensor = torch.cat(batch_imgs, dim=0)\n", "\n", " with torch.no_grad():\n", " inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n", " outputs = model(**inputs)\n", " desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n", " global_descs.append(desc.cpu())\n", "\n", " # Clear batch memory\n", " del batch_tensor, inputs, outputs, desc\n", " clear_memory()\n", "\n", " global_descs = torch.cat(global_descs, dim=0)\n", "\n", " del model, processor\n", " clear_memory()\n", "\n", " print(\"After DINO extraction:\")\n", " get_memory_info()\n", "\n", " return global_descs\n", "\n", "def build_topk_pairs(global_feats, k, device):\n", " \"\"\"Build top-k similar pairs from global features\"\"\"\n", " g = global_feats.to(device)\n", " sim = g @ g.T\n", " sim.fill_diagonal_(-1)\n", "\n", " N = sim.size(0)\n", " k = min(k, N - 1)\n", "\n", " topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n", "\n", " pairs = []\n", " for i in range(N):\n", " for j in topk_indices[i]:\n", " j = j.item()\n", " if i < j:\n", " pairs.append((i, j))\n", "\n", " # Remove duplicates\n", " pairs = list(set(pairs))\n", "\n", " return pairs\n", "\n", "def select_diverse_pairs(pairs, max_pairs, num_images):\n", " \"\"\"\n", " Select diverse pairs to ensure good image coverage\n", " \"\"\"\n", " import random\n", " random.seed(42)\n", "\n", " if len(pairs) <= max_pairs:\n", " return pairs\n", "\n", " print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n", "\n", " # Count how many times each image appears in pairs\n", " image_counts = {i: 0 for i in range(num_images)}\n", " for i, j in pairs:\n", " image_counts[i] += 1\n", " image_counts[j] += 1\n", "\n", " # Sort pairs by: prefer pairs with less-connected images\n", " def pair_score(pair):\n", " i, j = pair\n", " return image_counts[i] + image_counts[j]\n", "\n", " pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n", " pairs_scored.sort(key=lambda x: x[1])\n", "\n", " # Select pairs greedily to maximize coverage\n", " selected = []\n", " selected_images = set()\n", "\n", " # Phase 1: Select pairs that add new images\n", " for pair, score in pairs_scored:\n", " if len(selected) >= max_pairs:\n", " break\n", " i, j = pair\n", " if i not in selected_images or j not in selected_images:\n", " selected.append(pair)\n", " selected_images.add(i)\n", " selected_images.add(j)\n", "\n", " # Phase 2: Fill remaining slots\n", " if len(selected) < max_pairs:\n", " remaining = [p for p, s in pairs_scored if p not in selected]\n", " random.shuffle(remaining)\n", " selected.extend(remaining[:max_pairs - len(selected)])\n", "\n", " print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n", "\n", " return selected\n", "\n" ], "metadata": { "trusted": true, "id": "rOWldYaWlbLr" }, "outputs": [], "execution_count": 78 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# ペアのない画像を除外する関数(CELL 9の後に追加)\n", "# =====================================================================\n", "\n", "def filter_unpaired_images(image_paths, pairs):\n", " \"\"\"\n", " ペアに含まれる画像のみを残し、インデックスを再マッピングする\n", "\n", " Args:\n", " image_paths: 画像パスのリスト\n", " pairs: [(i, j), ...] 形式のペアリスト(元のインデックス)\n", "\n", " Returns:\n", " filtered_image_paths: フィルタ済み画像パスリスト\n", " remapped_pairs: 新しいインデックスでのペアリスト\n", " \"\"\"\n", " # ペアに含まれる画像インデックスを収集\n", " used_indices = set()\n", " for i, j in pairs:\n", " used_indices.add(i)\n", " used_indices.add(j)\n", "\n", " used_indices = sorted(used_indices)\n", "\n", " # 除外される画像を報告\n", " all_indices = set(range(len(image_paths)))\n", " removed_indices = all_indices - set(used_indices)\n", "\n", " if removed_indices:\n", " print(f\"\\n⚠️ Removing {len(removed_indices)} unpaired images:\")\n", " for idx in sorted(removed_indices):\n", " print(f\" - {idx}: {os.path.basename(image_paths[idx])}\")\n", "\n", " # 新しい画像リストを作成\n", " filtered_image_paths = [image_paths[i] for i in used_indices]\n", "\n", " # 古いインデックス → 新しいインデックス のマッピングを作成\n", " old_to_new = {old_idx: new_idx for new_idx, old_idx in enumerate(used_indices)}\n", "\n", " # ペアのインデックスを再マッピング\n", " remapped_pairs = []\n", " for i, j in pairs:\n", " new_i = old_to_new[i]\n", " new_j = old_to_new[j]\n", " remapped_pairs.append((new_i, new_j))\n", "\n", " print(f\"\\n✓ Filtered results:\")\n", " print(f\" Original images: {len(image_paths)}\")\n", " print(f\" Kept images: {len(filtered_image_paths)}\")\n", " print(f\" Pairs: {len(remapped_pairs)}\")\n", "\n", " # 確認: すべての画像がカバーされているか\n", " covered = set()\n", " for i, j in remapped_pairs:\n", " covered.add(i)\n", " covered.add(j)\n", "\n", " assert len(covered) == len(filtered_image_paths), \\\n", " f\"Remapping error: covered {len(covered)} but have {len(filtered_image_paths)} images\"\n", "\n", " return filtered_image_paths, remapped_pairs\n", "\n", "\n", "# =====================================================================\n", "# CELL 9のget_image_pairs_dino関数を修正(フィルタリング追加)\n", "# =====================================================================\n", "def get_image_pairs_dino(image_paths, max_pairs=None):\n", " \"\"\"DINO-based pair selection with unpaired image filtering\"\"\"\n", " device = Config.DEVICE\n", "\n", " # DINO global features\n", " global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n", " pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n", "\n", " print(f\"Initial pairs from DINO: {len(pairs)}\")\n", "\n", " # Apply intelligent pair selection if limit specified\n", " if max_pairs and len(pairs) > max_pairs:\n", " pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n", "\n", " # 🔧 ペアのない画像を除外\n", " filtered_image_paths, remapped_pairs = filter_unpaired_images(image_paths, pairs)\n", "\n", " return filtered_image_paths, remapped_pairs\n", "\n", "\n", "# =====================================================================\n", "# CELL 13のget_image_pairs_asmk関数も同様に修正\n", "# =====================================================================\n", "def get_image_pairs_asmk(image_paths, max_pairs=100):\n", " \"\"\"ASMKを使用して画像ペアを取得(フィルタリング追加)\"\"\"\n", " print(\"\\n=== Getting Image Pairs with ASMK ===\")\n", "\n", " device = Config.DEVICE\n", " model = load_mast3r_model(device)\n", " features = extract_mast3r_features(model, image_paths, device)\n", " similarity_matrix = compute_asmk_similarity(features)\n", " pairs = build_pairs_from_similarity(similarity_matrix, Config.RETRIEVAL_TOPK)\n", "\n", " # モデルを解放\n", " del model\n", " clear_memory()\n", "\n", " if len(pairs) > max_pairs:\n", " pairs = pairs[:max_pairs]\n", " print(f\"Limited to {max_pairs} pairs\")\n", "\n", " # 🔧 ペアのない画像を除外\n", " filtered_image_paths, remapped_pairs = filter_unpaired_images(image_paths, pairs)\n", "\n", " return filtered_image_paths, remapped_pairs\n", "\n", "\n", "# =====================================================================\n", "# メインパイプラインの呼び出し部分も修正が必要\n", "# =====================================================================\n", "# 使用例:\n", "# image_paths = load_images_from_directory(IMAGE_DIR, max_images=50)\n", "#\n", "# # DINOでペア選択(フィルタリング込み)\n", "# filtered_paths, pairs = get_image_pairs_dino(image_paths, max_pairs=100)\n", "#\n", "# # MASt3R実行時は filtered_paths を使用\n", "# model = load_mast3r_model(Config.DEVICE)\n", "# scene, images = run_mast3r_pairs(model, filtered_paths, pairs, Config.DEVICE)" ], "metadata": { "id": "MM_X5-VWzpoJ" }, "execution_count": 79, "outputs": [] }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 10: MASt3R Reconstruction\n", "# =====================================================================\n", "def run_mast3r_pairs(model, image_paths, pairs, device, batch_size=1, max_pairs=None):\n", " \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n", " print(\"\\n=== Running MASt3R Reconstruction ===\")\n", " print(\"Initial memory state:\")\n", " get_memory_info()\n", "\n", " from dust3r.inference import inference\n", " from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n", " from dust3r.utils.image import load_images\n", "\n", " # Limit number of pairs if specified\n", " if max_pairs and len(pairs) > max_pairs:\n", " print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n", " step = max(1, len(pairs) // max_pairs)\n", " pairs = pairs[::step][:max_pairs]\n", "\n", " print(f\"Processing {len(pairs)} pairs...\")\n", "\n", " # Load images in smaller size\n", " print(f\"Loading {len(image_paths)} images at {Config.IMAGE_SIZE}x{Config.IMAGE_SIZE}...\")\n", " images = load_images(image_paths, size=Config.IMAGE_SIZE)\n", "\n", " print(f\"Loaded {len(images)} images\")\n", " print(\"After loading images:\")\n", " get_memory_info()\n", "\n", " # Create all image pairs\n", " print(f\"Creating {len(pairs)} image pairs...\")\n", " mast3r_pairs = []\n", " for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n", " mast3r_pairs.append((images[idx1], images[idx2]))\n", "\n", " print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n", "\n", " # Run inference\n", " output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n", "\n", " del mast3r_pairs\n", " clear_memory()\n", "\n", " print(\"✓ MASt3R inference complete\")\n", " print(\"After inference:\")\n", " get_memory_info()\n", "\n", " # Global alignment\n", " print(\"Running global alignment...\")\n", " scene = global_aligner(\n", " output,\n", " device=device,\n", " mode=GlobalAlignerMode.PointCloudOptimizer\n", " )\n", "\n", " del output\n", " clear_memory()\n", "\n", " print(\"Computing global alignment...\")\n", " loss = scene.compute_global_alignment(\n", " init=\"mst\",\n", " niter=50, # Reduced iterations\n", " schedule='cosine',\n", " lr=0.01\n", " )\n", "\n", " print(f\"✓ Global alignment complete (final loss: {loss:.6f})\")\n", " print(\"Final memory state:\")\n", " get_memory_info()\n", "\n", " return scene, images" ], "metadata": { "trusted": true, "id": "v5QGLqLOlbLs" }, "outputs": [], "execution_count": 80 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 11: Camera Parameter Extraction (修正版)\n", "# =====================================================================\n", "def extract_camera_params_process2(scene, image_paths, conf_threshold=1.5):\n", " \"\"\"sceneからカメラパラメータと3D点を抽出(スケーリング修正版)\"\"\"\n", " print(\"\\n=== Extracting Camera Parameters ===\")\n", "\n", " cameras_dict = {}\n", " all_pts3d = []\n", " all_confidence = []\n", "\n", " try:\n", " if hasattr(scene, 'get_im_poses'):\n", " poses = scene.get_im_poses()\n", " elif hasattr(scene, 'im_poses'):\n", " poses = scene.im_poses\n", " else:\n", " poses = None\n", "\n", " if hasattr(scene, 'get_focals'):\n", " focals = scene.get_focals()\n", " elif hasattr(scene, 'im_focals'):\n", " focals = scene.im_focals\n", " else:\n", " focals = None\n", "\n", " if hasattr(scene, 'get_principal_points'):\n", " pps = scene.get_principal_points()\n", " elif hasattr(scene, 'im_pp'):\n", " pps = scene.im_pp\n", " else:\n", " pps = None\n", " except Exception as e:\n", " print(f\"⚠️ Error getting camera parameters: {e}\")\n", " poses = None\n", " focals = None\n", " pps = None\n", "\n", " # 【重要】MASt3Rの処理サイズ\n", " mast3r_size = 224.0\n", "\n", " n_images = min(len(poses) if poses is not None else len(image_paths), len(image_paths))\n", "\n", " for idx in range(n_images):\n", " img_name = os.path.basename(image_paths[idx])\n", "\n", " try:\n", " # 元画像のサイズを取得\n", " img = Image.open(image_paths[idx])\n", " W, H = img.size\n", " img.close()\n", "\n", " # スケール比を計算\n", " scale = W / mast3r_size\n", "\n", " # Poseを取得(camera-to-worldをworld-to-cameraに変換)\n", " if poses is not None and idx < len(poses):\n", " pose_c2w = poses[idx]\n", " if isinstance(pose_c2w, torch.Tensor):\n", " pose_c2w = pose_c2w.detach().cpu().numpy()\n", " if not isinstance(pose_c2w, np.ndarray) or pose_c2w.shape != (4, 4):\n", " pose_c2w = np.eye(4)\n", "\n", " # world-to-camera に変換\n", " pose = np.linalg.inv(pose_c2w)\n", " else:\n", " pose = np.eye(4)\n", "\n", " # Focalを取得してスケーリング\n", " if focals is not None and idx < len(focals):\n", " focal_mast3r = focals[idx]\n", " if isinstance(focal_mast3r, torch.Tensor):\n", " focal_mast3r = focal_mast3r.detach().cpu().item()\n", " else:\n", " focal_mast3r = float(focal_mast3r)\n", "\n", " # 🔧 スケーリング適用\n", " if focals.shape[1] == 1:\n", " # 等方性カメラ(fx = fy)\n", " focal = focal_mast3r * scale\n", " else:\n", " # 異方性カメラ\n", " focal = float(focals[idx, 0]) * scale\n", " else:\n", " focal = 1000.0\n", "\n", " # Principal pointを取得してスケーリング\n", " if pps is not None and idx < len(pps):\n", " pp_mast3r = pps[idx]\n", " if isinstance(pp_mast3r, torch.Tensor):\n", " pp_mast3r = pp_mast3r.detach().cpu().numpy()\n", "\n", " # 🔧 スケーリング適用\n", " pp = pp_mast3r * scale\n", " else:\n", " pp = np.array([W / 2.0, H / 2.0])\n", "\n", " # カメラパラメータを保存\n", " cameras_dict[img_name] = {\n", " 'focal': focal,\n", " 'pp': pp,\n", " 'pose': pose,\n", " 'rotation': pose[:3, :3],\n", " 'translation': pose[:3, 3],\n", " 'width': W,\n", " 'height': H\n", " }\n", "\n", " # デバッグ情報(最初の画像のみ)\n", " if idx == 0:\n", " print(f\"\\nExample camera 0:\")\n", " print(f\" Image size: {W}x{H}\")\n", " print(f\" MASt3R size: {mast3r_size}\")\n", " print(f\" Scale factor: {scale:.3f}\")\n", " print(f\" MASt3R focal: {focal_mast3r:.2f}\")\n", " print(f\" Scaled focal: {focal:.2f}\")\n", " print(f\" MASt3R pp: [{pp_mast3r[0]:.2f}, {pp_mast3r[1]:.2f}]\")\n", " print(f\" Scaled pp: [{pp[0]:.2f}, {pp[1]:.2f}]\")\n", "\n", " # 3D点を取得\n", " if hasattr(scene, 'im_pts3d') and idx < len(scene.im_pts3d):\n", " pts3d_img = scene.im_pts3d[idx]\n", " elif hasattr(scene, 'get_pts3d'):\n", " pts3d_all = scene.get_pts3d()\n", " if idx < len(pts3d_all):\n", " pts3d_img = pts3d_all[idx]\n", " else:\n", " pts3d_img = None\n", " else:\n", " pts3d_img = None\n", "\n", " # Confidenceを取得\n", " if hasattr(scene, 'im_conf') and idx < len(scene.im_conf):\n", " conf_img = scene.im_conf[idx]\n", " elif hasattr(scene, 'get_conf'):\n", " conf_all = scene.get_conf()\n", " if idx < len(conf_all):\n", " conf_img = conf_all[idx]\n", " else:\n", " conf_img = None\n", " else:\n", " conf_img = None\n", "\n", " # 3D点とconfidenceを処理\n", " if pts3d_img is not None:\n", " if isinstance(pts3d_img, torch.Tensor):\n", " pts3d_img = pts3d_img.detach().cpu().numpy()\n", "\n", " if pts3d_img.ndim == 3:\n", " pts3d_flat = pts3d_img.reshape(-1, 3)\n", " else:\n", " pts3d_flat = pts3d_img\n", "\n", " all_pts3d.append(pts3d_flat)\n", "\n", " # confidenceを処理\n", " if conf_img is not None:\n", " if isinstance(conf_img, list):\n", " conf_img = np.array(conf_img)\n", " elif isinstance(conf_img, torch.Tensor):\n", " conf_img = conf_img.detach().cpu().numpy()\n", "\n", " if conf_img.ndim > 1:\n", " conf_flat = conf_img.reshape(-1)\n", " else:\n", " conf_flat = conf_img\n", "\n", " if len(conf_flat) != len(pts3d_flat):\n", " conf_flat = np.ones(len(pts3d_flat))\n", "\n", " all_confidence.append(conf_flat)\n", " else:\n", " all_confidence.append(np.ones(len(pts3d_flat)))\n", "\n", " except Exception as e:\n", " print(f\"⚠️ Error processing image {idx} ({img_name}): {e}\")\n", " # デフォルト値でもスケーリングを適用\n", " img = Image.open(image_paths[idx])\n", " W, H = img.size\n", " img.close()\n", "\n", " cameras_dict[img_name] = {\n", " 'focal': 1000.0 * (W / mast3r_size),\n", " 'pp': np.array([W / 2.0, H / 2.0]),\n", " 'pose': np.eye(4),\n", " 'rotation': np.eye(3),\n", " 'translation': np.zeros(3),\n", " 'width': W,\n", " 'height': H\n", " }\n", " continue\n", "\n", " # 全3D点を結合\n", " if all_pts3d:\n", " pts3d = np.vstack(all_pts3d)\n", " confidence = np.concatenate(all_confidence)\n", " else:\n", " pts3d = np.zeros((0, 3))\n", " confidence = np.zeros(0)\n", "\n", " print(f\"✓ Extracted camera parameters for {len(cameras_dict)} cameras\")\n", " print(f\"✓ Total 3D points: {len(pts3d)}\")\n", "\n", " # Confidenceでフィルタリング\n", " if len(confidence) > 0:\n", " valid_mask = confidence > conf_threshold\n", " pts3d = pts3d[valid_mask]\n", " confidence = confidence[valid_mask]\n", " print(f\"✓ After confidence filtering (>{conf_threshold}): {len(pts3d)} points\")\n", "\n", " return cameras_dict, pts3d, confidence\n" ], "metadata": { "id": "YSt2RDqmviUa" }, "outputs": [], "execution_count": 81 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 12: COLMAP Export Functions (PINHOLE版)\n", "# =====================================================================\n", "\n", "import struct\n", "import numpy as np\n", "from pathlib import Path\n", "\n", "def rotmat_to_qvec(R):\n", " \"\"\"回転行列をクォータニオンに変換\"\"\"\n", " R = np.asarray(R, dtype=np.float64)\n", " trace = np.trace(R)\n", "\n", " if trace > 0:\n", " s = 0.5 / np.sqrt(trace + 1.0)\n", " w = 0.25 / s\n", " x = (R[2, 1] - R[1, 2]) * s\n", " y = (R[0, 2] - R[2, 0]) * s\n", " z = (R[1, 0] - R[0, 1]) * s\n", " elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n", " w = (R[2, 1] - R[1, 2]) / s\n", " x = 0.25 * s\n", " y = (R[0, 1] + R[1, 0]) / s\n", " z = (R[0, 2] + R[2, 0]) / s\n", " elif R[1, 1] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n", " w = (R[0, 2] - R[2, 0]) / s\n", " x = (R[0, 1] + R[1, 0]) / s\n", " y = 0.25 * s\n", " z = (R[1, 2] + R[2, 1]) / s\n", " else:\n", " s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n", " w = (R[1, 0] - R[0, 1]) / s\n", " x = (R[0, 2] + R[2, 0]) / s\n", " y = (R[1, 2] + R[2, 1]) / s\n", " z = 0.25 * s\n", "\n", " qvec = np.array([w, x, y, z], dtype=np.float64)\n", " qvec = qvec / np.linalg.norm(qvec)\n", "\n", " return qvec\n", "\n", "\n", "def write_cameras_binary(cameras_dict, image_size, output_file):\n", " \"\"\"\n", " cameras.binを出力(PINHOLEモデル使用)\n", " \"\"\"\n", " width, height = image_size\n", " num_cameras = len(cameras_dict)\n", "\n", " # COLMAP camera models\n", " PINHOLE = 1 # 🔧 SIMPLE_PINHOLE (0) から PINHOLE (1) に変更\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', num_cameras))\n", "\n", " for camera_id, (img_id, cam_params) in enumerate(cameras_dict.items(), start=1):\n", " focal = cam_params['focal']\n", "\n", " # PINHOLEの場合: fx, fy, cx, cy\n", " fx = fy = focal # 等方性カメラを仮定\n", "\n", " # Principal pointを取得(存在しない場合は中心)\n", " if 'pp' in cam_params:\n", " pp = cam_params['pp']\n", " cx = float(pp[0])\n", " cy = float(pp[1])\n", " else:\n", " cx = width / 2.0\n", " cy = height / 2.0\n", "\n", " # camera_id\n", " f.write(struct.pack('I', camera_id))\n", " # model_id (PINHOLE = 1)\n", " f.write(struct.pack('i', PINHOLE))\n", " # width\n", " f.write(struct.pack('Q', width))\n", " # height\n", " f.write(struct.pack('Q', height))\n", " # params: fx, fy, cx, cy (4パラメータ)\n", " f.write(struct.pack('d', fx))\n", " f.write(struct.pack('d', fy))\n", " f.write(struct.pack('d', cx))\n", " f.write(struct.pack('d', cy))\n", "\n", " print(f\"COLMAP cameras.bin saved to {output_file}\")\n", "\n", "\n", "def write_images_binary(cameras_dict, output_file):\n", " \"\"\"images.binを出力\"\"\"\n", " num_images = len(cameras_dict)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', num_images))\n", "\n", " for image_id, (img_id, cam_params) in enumerate(cameras_dict.items(), start=1):\n", " R = cam_params['rotation']\n", " quat = rotmat_to_qvec(R)\n", " t = cam_params['translation']\n", " camera_id = image_id\n", "\n", " f.write(struct.pack('I', image_id))\n", " for q in quat:\n", " f.write(struct.pack('d', q))\n", " for ti in t:\n", " f.write(struct.pack('d', ti))\n", " f.write(struct.pack('I', camera_id))\n", "\n", " name_bytes = img_id.encode('utf-8') + b'\\x00'\n", " f.write(name_bytes)\n", " f.write(struct.pack('Q', 0))\n", "\n", " print(f\"COLMAP images.bin saved to {output_file}\")\n", "\n", "\n", "def write_points3D_binary(pts3d, confidence, output_file):\n", " \"\"\"points3D.binを出力\"\"\"\n", " num_points = len(pts3d)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', num_points))\n", "\n", " for point_id, pt in enumerate(pts3d, start=1):\n", " x, y, z = pt\n", "\n", " f.write(struct.pack('Q', point_id))\n", " f.write(struct.pack('d', x))\n", " f.write(struct.pack('d', y))\n", " f.write(struct.pack('d', z))\n", "\n", " # RGB (グレー)\n", " f.write(struct.pack('B', 128))\n", " f.write(struct.pack('B', 128))\n", " f.write(struct.pack('B', 128))\n", "\n", " # error\n", " if confidence is not None and point_id <= len(confidence):\n", " error = 1.0 / max(confidence[point_id-1], 0.001)\n", " else:\n", " error = 1.0\n", " f.write(struct.pack('d', error))\n", "\n", " # track_length\n", " f.write(struct.pack('Q', 0))\n", "\n", " print(f\"COLMAP points3D.bin saved to {output_file}\")\n", "\n", "\n", "def export_colmap_binary(cameras_dict, pts3d, confidence, image_size, output_dir):\n", " \"\"\"COLMAPバイナリファイルを出力\"\"\"\n", " output_path = Path(output_dir)\n", " output_path.mkdir(parents=True, exist_ok=True)\n", "\n", " write_cameras_binary(\n", " cameras_dict,\n", " image_size,\n", " output_path / 'cameras.bin'\n", " )\n", "\n", " write_images_binary(\n", " cameras_dict,\n", " output_path / 'images.bin'\n", " )\n", "\n", " write_points3D_binary(\n", " pts3d,\n", " confidence,\n", " output_path / 'points3D.bin'\n", " )\n", "\n", " print(f\"\\nCOLMAP binary files exported to {output_dir}/\")\n", " print(f\" - cameras.bin: {len(cameras_dict)} cameras (PINHOLE model)\")\n", " print(f\" - images.bin: {len(cameras_dict)} images\")\n", " print(f\" - points3D.bin: {len(pts3d)} points\")" ], "metadata": { "id": "jNk5C0k1zkLD" }, "outputs": [], "execution_count": 82 }, { "cell_type": "code", "source": [], "metadata": { "id": "gDbmwRKsEkYi" }, "outputs": [], "execution_count": 82 }, { "cell_type": "markdown", "source": [ "**comparison**" ], "metadata": { "id": "yGny_hJrElYL" } }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 20: Traditional Method Functions (for comparison)\n", "# =====================================================================\n", "import struct\n", "import numpy as np\n", "from pathlib import Path\n", "\n", "# ===== 従来法: extract_colmap_data =====\n", "def extract_colmap_data_traditional(scene, image_paths, max_points=1000000):\n", " \"\"\"\n", " 従来法: MASt3Rシーンから COLMAP互換データを抽出\n", " (dino-mast3r-gs-kg-34oo.ipynb からの抽出)\n", " \"\"\"\n", " print(\"\\n=== [TRADITIONAL] Extracting COLMAP-compatible data ===\")\n", "\n", " # Extract point cloud\n", " pts_all = scene.get_pts3d()\n", " print(f\"pts_all type: {type(pts_all)}\")\n", "\n", " if isinstance(pts_all, list):\n", " print(f\"pts_all is a list with {len(pts_all)} elements\")\n", " if len(pts_all) > 0:\n", " print(f\"First element type: {type(pts_all[0])}\")\n", " if hasattr(pts_all[0], 'shape'):\n", " print(f\"First element shape: {pts_all[0].shape}\")\n", "\n", " pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p)\n", " for p in pts_all])\n", " print(f\"pts_all shape after conversion: {pts_all.shape}\")\n", "\n", " if len(pts_all.shape) == 4:\n", " print(f\"Found batched point cloud: {pts_all.shape}\")\n", " B, H, W, _ = pts_all.shape\n", " pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy()\n", "\n", " # Extract colors\n", " colors = []\n", " for img_path in image_paths:\n", " img = Image.open(img_path).resize((W, H))\n", " colors.append(np.array(img))\n", " colors = np.stack(colors).reshape(-1, 3) / 255.0\n", " else:\n", " pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n", " colors = np.ones((len(pts3d), 3)) * 0.5\n", "\n", " print(f\"✓ Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n", "\n", " # Downsample points\n", " if len(pts3d) > max_points:\n", " print(f\"\\n⚠ Downsampling from {len(pts3d)} to {max_points} points...\")\n", " valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n", " pts3d_valid = pts3d[valid_mask]\n", " colors_valid = colors[valid_mask]\n", " indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n", " pts3d = pts3d_valid[indices]\n", " colors = colors_valid[indices]\n", " print(f\"✓ Downsampled to {len(pts3d)} points\")\n", "\n", " # Extract camera parameters\n", " print(\"Extracting camera parameters...\")\n", "\n", " # 【重要】camera-to-world を world-to-camera に変換\n", " poses_c2w = scene.get_im_poses().detach().cpu().numpy()\n", " print(f\"Retrieved camera-to-world poses: shape {poses_c2w.shape}\")\n", "\n", " poses = []\n", " for i, pose_c2w in enumerate(poses_c2w):\n", " pose_w2c = np.linalg.inv(pose_c2w)\n", " poses.append(pose_w2c)\n", " poses = np.array(poses)\n", " print(f\"Converted to world-to-camera poses for COLMAP\")\n", "\n", " focals = scene.get_focals().detach().cpu().numpy()\n", " pp = scene.get_principal_points().detach().cpu().numpy()\n", " print(f\"Focals shape: {focals.shape}\")\n", " print(f\"Principal points shape: {pp.shape}\")\n", "\n", " mast3r_size = 224.0\n", "\n", " cameras = []\n", " for i, img_path in enumerate(image_paths):\n", " img = Image.open(img_path)\n", " W, H = img.size\n", " scale = W / mast3r_size\n", "\n", " if focals.shape[1] == 1:\n", " focal_mast3r = float(focals[i, 0])\n", " fx = fy = focal_mast3r * scale\n", " else:\n", " fx = float(focals[i, 0]) * scale\n", " fy = float(focals[i, 1]) * scale\n", "\n", " cx = float(pp[i, 0]) * scale\n", " cy = float(pp[i, 1]) * scale\n", "\n", " camera = {\n", " 'camera_id': i + 1,\n", " 'model': 'PINHOLE',\n", " 'width': W,\n", " 'height': H,\n", " 'params': [fx, fy, cx, cy]\n", " }\n", " cameras.append(camera)\n", "\n", " if i == 0:\n", " print(f\"\\nExample camera 0:\")\n", " print(f\" Image size: {W}x{H}\")\n", " print(f\" MASt3R focal: {focal_mast3r:.2f}, pp: ({pp[i,0]:.2f}, {pp[i,1]:.2f})\")\n", " print(f\" Scaled fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}\")\n", " print(f\" Pose (first row): {poses[i][0]}\")\n", "\n", " print(f\"\\n✓ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n", "\n", " return pts3d, colors, cameras, poses\n", "\n", "\n", "# ===== 従来法: rotmat2qvec =====\n", "def rotmat2qvec_traditional(R):\n", " \"\"\"従来法: 回転行列をクォータニオンに変換\"\"\"\n", " R = np.asarray(R, dtype=np.float64)\n", " trace = np.trace(R)\n", "\n", " if trace > 0:\n", " s = 0.5 / np.sqrt(trace + 1.0)\n", " w = 0.25 / s\n", " x = (R[2, 1] - R[1, 2]) * s\n", " y = (R[0, 2] - R[2, 0]) * s\n", " z = (R[1, 0] - R[0, 1]) * s\n", " elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n", " w = (R[2, 1] - R[1, 2]) / s\n", " x = 0.25 * s\n", " y = (R[0, 1] + R[1, 0]) / s\n", " z = (R[0, 2] + R[2, 0]) / s\n", " elif R[1, 1] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n", " w = (R[0, 2] - R[2, 0]) / s\n", " x = (R[0, 1] + R[1, 0]) / s\n", " y = 0.25 * s\n", " z = (R[1, 2] + R[2, 1]) / s\n", " else:\n", " s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n", " w = (R[1, 0] - R[0, 1]) / s\n", " x = (R[0, 2] + R[2, 0]) / s\n", " y = (R[1, 2] + R[2, 1]) / s\n", " z = 0.25 * s\n", "\n", " qvec = np.array([w, x, y, z], dtype=np.float64)\n", " qvec = qvec / np.linalg.norm(qvec)\n", "\n", " return qvec\n", "\n", "\n", "# ===== 従来法: save関数群 =====\n", "def write_cameras_binary_traditional(cameras, output_file):\n", " \"\"\"従来法: cameras.binを書き込み\"\"\"\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(cameras)))\n", "\n", " for i, cam in enumerate(cameras):\n", " camera_id = cam.get('camera_id', i + 1)\n", " model_id = 1 # PINHOLE\n", " width = cam['width']\n", " height = cam['height']\n", " params = cam['params']\n", "\n", " f.write(struct.pack('i', camera_id))\n", " f.write(struct.pack('i', model_id))\n", " f.write(struct.pack('Q', width))\n", " f.write(struct.pack('Q', height))\n", "\n", " for param in params[:4]:\n", " f.write(struct.pack('d', param))\n", "\n", "\n", "def write_images_binary_traditional(image_paths, cameras, poses, output_file):\n", " \"\"\"従来法: images.binを書き込み\"\"\"\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(image_paths)))\n", "\n", " for i, (img_path, pose) in enumerate(zip(image_paths, poses)):\n", " image_id = i + 1\n", " camera_id = cameras[i].get('camera_id', i + 1)\n", " image_name = os.path.basename(img_path)\n", "\n", " R = pose[:3, :3]\n", " t = pose[:3, 3]\n", " qvec = rotmat2qvec_traditional(R)\n", " tvec = t\n", "\n", " f.write(struct.pack('i', image_id))\n", " for q in qvec:\n", " f.write(struct.pack('d', float(q)))\n", " for tv in tvec:\n", " f.write(struct.pack('d', float(tv)))\n", " f.write(struct.pack('i', camera_id))\n", " f.write(image_name.encode('utf-8') + b'\\x00')\n", " f.write(struct.pack('Q', 0))\n", "\n", "\n", "def write_points3d_binary_traditional(pts3d, colors, output_file):\n", " \"\"\"従来法: points3D.binを書き込み\"\"\"\n", " valid_indices = []\n", " for i, pt in enumerate(pts3d):\n", " if not (np.isnan(pt).any() or np.isinf(pt).any()):\n", " valid_indices.append(i)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(valid_indices)))\n", "\n", " for idx, point_id in enumerate(valid_indices):\n", " pt = pts3d[point_id]\n", " color = colors[point_id]\n", "\n", " f.write(struct.pack('Q', point_id))\n", " for coord in pt:\n", " f.write(struct.pack('d', float(coord)))\n", "\n", " col_int = (color * 255).astype(np.uint8)\n", " for c in col_int:\n", " f.write(struct.pack('B', int(c)))\n", "\n", " f.write(struct.pack('d', 0.0))\n", " f.write(struct.pack('Q', 0))\n", "\n", " return len(valid_indices)\n", "\n", "\n", "def save_colmap_reconstruction_traditional(pts3d, colors, cameras, poses, image_paths, output_dir):\n", " \"\"\"従来法: COLMAP再構成を保存\"\"\"\n", " print(\"\\n=== [TRADITIONAL] Saving COLMAP reconstruction ===\")\n", "\n", " sparse_dir = Path(output_dir) / 'sparse_traditional' / '0'\n", " sparse_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " write_cameras_binary_traditional(cameras, sparse_dir / 'cameras.bin')\n", " print(f\" ✓ Wrote {len(cameras)} cameras\")\n", "\n", " write_images_binary_traditional(image_paths, cameras, poses, sparse_dir / 'images.bin')\n", " print(f\" ✓ Wrote {len(image_paths)} images\")\n", "\n", " num_points = write_points3d_binary_traditional(pts3d, colors, sparse_dir / 'points3D.bin')\n", " print(f\" ✓ Wrote {num_points} 3D points\")\n", "\n", " print(f\"\\n✓ Traditional COLMAP reconstruction saved to {sparse_dir}\")\n", "\n", " return sparse_dir" ], "metadata": { "id": "kIrrlZXQEkSA" }, "outputs": [], "execution_count": 83 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 21: Convert BIN to CSV for Easy Comparison\n", "# =====================================================================\n", "import pandas as pd\n", "import struct\n", "\n", "def bin_to_csv_cameras(bin_file, csv_file):\n", " \"\"\"cameras.bin → CSV\"\"\"\n", " data = []\n", " with open(bin_file, 'rb') as f:\n", " num_cameras = struct.unpack('Q', f.read(8))[0]\n", " for _ in range(num_cameras):\n", " camera_id = struct.unpack('i', f.read(4))[0]\n", " model_id = struct.unpack('i', f.read(4))[0]\n", " width = struct.unpack('Q', f.read(8))[0]\n", " height = struct.unpack('Q', f.read(8))[0]\n", "\n", " # PINHOLE: 4 params\n", " if model_id == 1:\n", " params = struct.unpack('dddd', f.read(32))\n", " # SIMPLE_PINHOLE: 3 params\n", " else:\n", " params = struct.unpack('ddd', f.read(24))\n", "\n", " data.append({\n", " 'camera_id': camera_id,\n", " 'model_id': model_id,\n", " 'width': width,\n", " 'height': height,\n", " 'fx': params[0] if len(params) >= 1 else None,\n", " 'fy': params[1] if len(params) >= 2 else params[0] if len(params) == 1 else None,\n", " 'cx': params[2] if len(params) >= 3 else None,\n", " 'cy': params[3] if len(params) >= 4 else None\n", " })\n", "\n", " df = pd.DataFrame(data)\n", " df.to_csv(csv_file, index=False)\n", " print(f\"✓ Cameras CSV saved: {csv_file}\")\n", " return df\n", "\n", "\n", "def bin_to_csv_images(bin_file, csv_file):\n", " \"\"\"images.bin → CSV\"\"\"\n", " data = []\n", " with open(bin_file, 'rb') as f:\n", " num_images = struct.unpack('Q', f.read(8))[0]\n", " for _ in range(num_images):\n", " image_id = struct.unpack('i', f.read(4))[0]\n", " qvec = struct.unpack('dddd', f.read(32))\n", " tvec = struct.unpack('ddd', f.read(24))\n", " camera_id = struct.unpack('i', f.read(4))[0]\n", "\n", " name = b''\n", " while True:\n", " char = f.read(1)\n", " if char == b'\\x00':\n", " break\n", " name += char\n", " name = name.decode('utf-8')\n", "\n", " num_points2D = struct.unpack('Q', f.read(8))[0]\n", " f.read(num_points2D * 24)\n", "\n", " data.append({\n", " 'image_id': image_id,\n", " 'qw': qvec[0],\n", " 'qx': qvec[1],\n", " 'qy': qvec[2],\n", " 'qz': qvec[3],\n", " 'tx': tvec[0],\n", " 'ty': tvec[1],\n", " 'tz': tvec[2],\n", " 'camera_id': camera_id,\n", " 'name': name\n", " })\n", "\n", " df = pd.DataFrame(data)\n", " df.to_csv(csv_file, index=False)\n", " print(f\"✓ Images CSV saved: {csv_file}\")\n", " return df\n", "\n", "\n", "def bin_to_csv_points3d(bin_file, csv_file, max_rows=10000):\n", " \"\"\"points3D.bin → CSV (サンプリング)\"\"\"\n", " data = []\n", " with open(bin_file, 'rb') as f:\n", " num_points = struct.unpack('Q', f.read(8))[0]\n", "\n", " # サンプリング間隔を計算\n", " step = max(1, num_points // max_rows)\n", "\n", " for i in range(num_points):\n", " point_id = struct.unpack('Q', f.read(8))[0]\n", " xyz = struct.unpack('ddd', f.read(24))\n", " rgb = struct.unpack('BBB', f.read(3))\n", " error = struct.unpack('d', f.read(8))[0]\n", " track_length = struct.unpack('Q', f.read(8))[0]\n", " f.read(track_length * 8)\n", "\n", " # サンプリング\n", " if i % step == 0:\n", " data.append({\n", " 'point_id': point_id,\n", " 'x': xyz[0],\n", " 'y': xyz[1],\n", " 'z': xyz[2],\n", " 'r': rgb[0],\n", " 'g': rgb[1],\n", " 'b': rgb[2],\n", " 'error': error\n", " })\n", "\n", " df = pd.DataFrame(data)\n", " df.to_csv(csv_file, index=False)\n", " print(f\"✓ Points3D CSV saved: {csv_file} (sampled {len(df)} / {num_points} points)\")\n", " return df\n", "\n", "\n", "def convert_colmap_bins_to_csv(sparse_dir, output_prefix):\n", " \"\"\"全BINファイルをCSVに変換\"\"\"\n", " print(f\"\\n=== Converting {sparse_dir} to CSV ===\")\n", "\n", " cameras_df = bin_to_csv_cameras(\n", " os.path.join(sparse_dir, 'cameras.bin'),\n", " f\"{output_prefix}_cameras.csv\"\n", " )\n", "\n", " images_df = bin_to_csv_images(\n", " os.path.join(sparse_dir, 'images.bin'),\n", " f\"{output_prefix}_images.csv\"\n", " )\n", "\n", " points_df = bin_to_csv_points3d(\n", " os.path.join(sparse_dir, 'points3D.bin'),\n", " f\"{output_prefix}_points3d.csv\",\n", " max_rows=10000\n", " )\n", "\n", " return cameras_df, images_df, points_df" ], "metadata": { "id": "c7A05pXLFt2E" }, "outputs": [], "execution_count": 84 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 22: Comparison Function\n", "# =====================================================================\n", "\n", "def compare_extraction_methods(scene, image_paths, output_dir, conf_threshold=0.5, max_points=500000):\n", " \"\"\"\n", " 新方式と従来法の両方でCOLMAP形式を出力し、比較する\n", "\n", " Args:\n", " scene: MASt3Rのシーンオブジェクト\n", " image_paths: 画像パスのリスト\n", " output_dir: 出力ディレクトリ\n", " conf_threshold: 信頼度閾値(新方式用)\n", " max_points: 最大点数(従来法用)\n", "\n", " Returns:\n", " dict: 比較結果の辞書\n", " \"\"\"\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"COMPARISON: New vs Traditional Extraction Methods\")\n", " print(\"=\"*70)\n", "\n", " # ===== METHOD 1: 新方式 (extract_camera_params_process2) =====\n", " print(\"\\n--- METHOD 1: Current Implementation (extract_camera_params_process2) ---\")\n", "\n", " cameras_dict_new, pts3d_new, confidence_new = extract_camera_params_process2(\n", " scene=scene,\n", " image_paths=image_paths,\n", " conf_threshold=conf_threshold\n", " )\n", "\n", " # 画像サイズを取得\n", " first_img = Image.open(image_paths[0])\n", " image_size = (first_img.width, first_img.height)\n", " first_img.close()\n", "\n", " # 新方式のBIN保存\n", " sparse_dir_new = os.path.join(output_dir, \"sparse_new/0\")\n", " os.makedirs(sparse_dir_new, exist_ok=True)\n", "\n", " export_colmap_binary(\n", " cameras_dict=cameras_dict_new,\n", " pts3d=pts3d_new,\n", " confidence=confidence_new,\n", " image_size=image_size,\n", " output_dir=sparse_dir_new\n", " )\n", "\n", " # ===== METHOD 2: 従来法 (extract_colmap_data_traditional) =====\n", " print(\"\\n--- METHOD 2: Traditional Implementation (extract_colmap_data) ---\")\n", "\n", " pts3d_trad, colors_trad, cameras_trad, poses_trad = extract_colmap_data_traditional(\n", " scene=scene,\n", " image_paths=image_paths,\n", " max_points=max_points\n", " )\n", "\n", " # 従来法のBIN保存\n", " sparse_dir_trad = save_colmap_reconstruction_traditional(\n", " pts3d=pts3d_trad,\n", " colors=colors_trad,\n", " cameras=cameras_trad,\n", " poses=poses_trad,\n", " image_paths=image_paths,\n", " output_dir=output_dir\n", " )\n", "\n", " # ===== CSVに変換 =====\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"Converting to CSV for comparison\")\n", " print(\"=\"*70)\n", "\n", " csv_prefix_new = os.path.join(output_dir, \"comparison_new\")\n", " csv_prefix_trad = os.path.join(output_dir, \"comparison_traditional\")\n", "\n", " cam_new, img_new, pts_new = convert_colmap_bins_to_csv(\n", " sparse_dir_new,\n", " csv_prefix_new\n", " )\n", "\n", " cam_trad, img_trad, pts_trad = convert_colmap_bins_to_csv(\n", " str(sparse_dir_trad),\n", " csv_prefix_trad\n", " )\n", "\n", " # ===== 比較サマリー =====\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"COMPARISON SUMMARY\")\n", " print(\"=\"*70)\n", "\n", " comparison_results = {\n", " 'cameras': {\n", " 'new_count': len(cam_new),\n", " 'trad_count': len(cam_trad),\n", " 'new_focal': float(cam_new.iloc[0]['fx']) if len(cam_new) > 0 else None,\n", " 'trad_focal': float(cam_trad.iloc[0]['fx']) if len(cam_trad) > 0 else None,\n", " },\n", " 'images': {\n", " 'new_count': len(img_new),\n", " 'trad_count': len(img_trad),\n", " 'new_tvec': [float(img_new.iloc[0]['tx']), float(img_new.iloc[0]['ty']), float(img_new.iloc[0]['tz'])] if len(img_new) > 0 else None,\n", " 'trad_tvec': [float(img_trad.iloc[0]['tx']), float(img_trad.iloc[0]['ty']), float(img_trad.iloc[0]['tz'])] if len(img_trad) > 0 else None,\n", " },\n", " 'points': {\n", " 'new_count': len(pts_new),\n", " 'trad_count': len(pts_trad),\n", " 'new_center': [float(pts_new['x'].mean()), float(pts_new['y'].mean()), float(pts_new['z'].mean())] if len(pts_new) > 0 else None,\n", " 'trad_center': [float(pts_trad['x'].mean()), float(pts_trad['y'].mean()), float(pts_trad['z'].mean())] if len(pts_trad) > 0 else None,\n", " }\n", " }\n", "\n", " # 結果を表示\n", " print(\"\\nCAMERAS:\")\n", " print(f\" New method: {comparison_results['cameras']['new_count']} cameras\")\n", " print(f\" Traditional method: {comparison_results['cameras']['trad_count']} cameras\")\n", " if comparison_results['cameras']['new_focal'] and comparison_results['cameras']['trad_focal']:\n", " print(f\"\\n Sample focal lengths:\")\n", " print(f\" New: fx={comparison_results['cameras']['new_focal']:.2f}\")\n", " print(f\" Traditional: fx={comparison_results['cameras']['trad_focal']:.2f}\")\n", " focal_diff = abs(comparison_results['cameras']['new_focal'] - comparison_results['cameras']['trad_focal'])\n", " print(f\" Difference: {focal_diff:.2f}\")\n", "\n", " print(\"\\nIMAGES:\")\n", " print(f\" New method: {comparison_results['images']['new_count']} images\")\n", " print(f\" Traditional method: {comparison_results['images']['trad_count']} images\")\n", " if comparison_results['images']['new_tvec'] and comparison_results['images']['trad_tvec']:\n", " print(f\"\\n Sample translation (first image):\")\n", " print(f\" New: {comparison_results['images']['new_tvec']}\")\n", " print(f\" Traditional: {comparison_results['images']['trad_tvec']}\")\n", " tvec_diff = np.linalg.norm(\n", " np.array(comparison_results['images']['new_tvec']) -\n", " np.array(comparison_results['images']['trad_tvec'])\n", " )\n", " print(f\" Distance: {tvec_diff:.3f}\")\n", "\n", " print(\"\\nPOINTS3D:\")\n", " print(f\" New method: {comparison_results['points']['new_count']} points (sampled)\")\n", " print(f\" Traditional method: {comparison_results['points']['trad_count']} points (sampled)\")\n", " if comparison_results['points']['new_center'] and comparison_results['points']['trad_center']:\n", " print(f\"\\n Center of points:\")\n", " print(f\" New: {comparison_results['points']['new_center']}\")\n", " print(f\" Traditional: {comparison_results['points']['trad_center']}\")\n", " center_diff = np.linalg.norm(\n", " np.array(comparison_results['points']['new_center']) -\n", " np.array(comparison_results['points']['trad_center'])\n", " )\n", " print(f\" Distance: {center_diff:.3f}\")\n", "\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"CSV FILES SAVED:\")\n", " print(\"=\"*70)\n", " print(f\" New method:\")\n", " print(f\" - {csv_prefix_new}_cameras.csv\")\n", " print(f\" - {csv_prefix_new}_images.csv\")\n", " print(f\" - {csv_prefix_new}_points3d.csv\")\n", " print(f\" Traditional method:\")\n", " print(f\" - {csv_prefix_trad}_cameras.csv\")\n", " print(f\" - {csv_prefix_trad}_images.csv\")\n", " print(f\" - {csv_prefix_trad}_points3d.csv\")\n", "\n", " print(\"\\n✓ Comparison complete! Review CSV files for detailed analysis.\")\n", "\n", " return comparison_results" ], "metadata": { "id": "SN1a_CbWEkIg" }, "outputs": [], "execution_count": 85 }, { "cell_type": "code", "source": [], "metadata": { "id": "KJPwr7rbE1kO" }, "outputs": [], "execution_count": 85 }, { "cell_type": "code", "source": [], "metadata": { "id": "E_ZQUzrhE1b_" }, "outputs": [], "execution_count": 85 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 13: 初期点群をPLYで保存する関数\n", "# =====================================================================\n", "def save_initial_pointcloud_ply(pts3d, output_path):\n", " \"\"\"\n", " MASt3Rの3D点をPLYファイルとして保存\n", " \"\"\"\n", " print(f\"\\nSaving initial point cloud to {output_path}\")\n", "\n", " with open(output_path, 'w') as f:\n", " # PLYヘッダー\n", " f.write(\"ply\\n\")\n", " f.write(\"format ascii 1.0\\n\")\n", " f.write(f\"element vertex {len(pts3d)}\\n\")\n", " f.write(\"property float x\\n\")\n", " f.write(\"property float y\\n\")\n", " f.write(\"property float z\\n\")\n", " f.write(\"property uchar red\\n\")\n", " f.write(\"property uchar green\\n\")\n", " f.write(\"property uchar blue\\n\")\n", " f.write(\"end_header\\n\")\n", "\n", " # 点データ(グレーで保存)\n", " for pt in pts3d:\n", " x, y, z = pt[0], pt[1], pt[2]\n", " f.write(f\"{x} {y} {z} 128 128 128\\n\")\n", "\n", " print(f\"✓ Saved {len(pts3d)} points to {output_path}\")\n", " return output_path" ], "metadata": { "id": "lHdqGcsaDLfb" }, "outputs": [], "execution_count": 86 }, { "cell_type": "code", "source": [], "metadata": { "id": "3iOFnjrhDLV0" }, "outputs": [], "execution_count": 86 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# CELL 15: Gaussian Splatting実行関数(修正版)\n", "# =====================================================================\n", "def run_gaussian_splatting(source_dir, output_dir, iterations=30000, ply_path=None):\n", " \"\"\"Run Gaussian Splatting training\"\"\"\n", " import subprocess\n", " import sys\n", "\n", " print(\"\\n=== Running Gaussian Splatting ===\")\n", "\n", " # plyfileを確実にインストール\n", " print(\"Ensuring plyfile is installed...\")\n", " subprocess.run([sys.executable, \"-m\", \"pip\", \"install\", \"plyfile\", \"-q\"], check=True)\n", "\n", " gs_script = \"/content/gaussian-splatting/train.py\"\n", "\n", " if not os.path.exists(gs_script):\n", " print(f\"✗ Gaussian Splatting script not found at {gs_script}\")\n", " return None\n", "\n", " cmd = [\n", " sys.executable,\n", " gs_script,\n", " \"-s\", source_dir,\n", " \"-m\", output_dir,\n", " \"--iterations\", str(iterations),\n", " \"--eval\"\n", " ]\n", "\n", " # 🔧 --ply_path オプションを削除(サポートされていない)\n", " # Gaussian SplattingはCOLMAPデータから自動的に初期点群を生成します\n", "\n", " print(f\"Command: {' '.join(cmd)}\")\n", " print(f\" Source: {source_dir}\")\n", " print(f\" Output: {output_dir}\")\n", " print(\" Note: Using COLMAP data for initialization\")\n", "\n", " try:\n", " result = subprocess.run(\n", " cmd,\n", " capture_output=True,\n", " text=True,\n", " check=False\n", " )\n", "\n", " if result.returncode != 0:\n", " print(f\"\\n✗ Gaussian Splatting failed with return code {result.returncode}\")\n", " print(\"\\n--- STDOUT ---\")\n", " print(result.stdout)\n", " print(\"\\n--- STDERR ---\")\n", " print(result.stderr)\n", " return None\n", " else:\n", " print(\"✓ Gaussian Splatting completed successfully\")\n", " return output_dir\n", "\n", " except Exception as e:\n", " print(f\"✗ Error running Gaussian Splatting: {e}\")\n", " return None" ], "metadata": { "id": "o0n2RL3Ep5_Y" }, "outputs": [], "execution_count": 87 }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# 修正: Gaussian Splatting実行セル\n", "# エラー原因: --ply_path パラメータがサポートされていない\n", "# 解決方法: points3D.plyとして配置し、パラメータなしで実行\n", "# =====================================================================\n", "\n", "def save_initial_pointcloud(pts3d, confidence, output_file):\n", " \"\"\"初期点群をPLY形式で保存\"\"\"\n", " print(f\"\\nSaving initial point cloud to {output_file}\")\n", "\n", " try:\n", " from plyfile import PlyData, PlyElement\n", " import numpy as np\n", "\n", " # 有効な点のみを抽出\n", " valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n", " pts3d_valid = pts3d[valid_mask]\n", "\n", " # デフォルトの色(グレー)とNormal\n", " colors = np.ones((len(pts3d_valid), 3)) * 128\n", " normals = np.zeros((len(pts3d_valid), 3))\n", " normals[:, 2] = 1.0\n", "\n", " # PLYデータを構築\n", " vertices = np.empty(len(pts3d_valid), dtype=[\n", " ('x', 'f4'), ('y', 'f4'), ('z', 'f4'),\n", " ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),\n", " ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')\n", " ])\n", "\n", " vertices['x'] = pts3d_valid[:, 0]\n", " vertices['y'] = pts3d_valid[:, 1]\n", " vertices['z'] = pts3d_valid[:, 2]\n", " vertices['nx'] = normals[:, 0]\n", " vertices['ny'] = normals[:, 1]\n", " vertices['nz'] = normals[:, 2]\n", " vertices['red'] = colors[:, 0].astype(np.uint8)\n", " vertices['green'] = colors[:, 1].astype(np.uint8)\n", " vertices['blue'] = colors[:, 2].astype(np.uint8)\n", "\n", " el = PlyElement.describe(vertices, 'vertex')\n", " PlyData([el]).write(output_file)\n", "\n", " print(f\"✓ Saved {len(pts3d_valid)} points to {output_file}\")\n", "\n", " except Exception as e:\n", " print(f\"✗ Error saving PLY: {e}\")\n", "\n", "\n", "def run_gaussian_splatting_fixed(output_dir, iterations=1000):\n", " \"\"\"\n", " Gaussian Splattingを実行(修正版)\n", "\n", " 修正内容:\n", " 1. input.plyをsparse/0/points3D.plyにコピー\n", " 2. --ply_pathパラメータを削除\n", " \"\"\"\n", " import subprocess\n", " import os\n", " from pathlib import Path\n", "\n", " output_path = Path(output_dir)\n", " sparse_dir = output_path / \"sparse\" / \"0\"\n", " gs_output_dir = output_path / \"gaussian_splatting\"\n", " initial_ply = output_path / \"input.ply\"\n", "\n", " print(\"=\" * 70)\n", " print(\"STEP 5: Running Gaussian Splatting (修正版)\")\n", " print(\"=\" * 70)\n", "\n", " # 🔧 修正1: points3D.plyとして配置\n", " target_ply = sparse_dir / \"points3D.ply\"\n", "\n", " if initial_ply.exists():\n", " print(f\"\\n✓ Initial point cloud found: {initial_ply}\")\n", " print(f\" Copying to: {target_ply}\")\n", "\n", " import shutil\n", " shutil.copy(initial_ply, target_ply)\n", " print(f\"✓ Copied successfully\")\n", " else:\n", " print(f\"⚠️ Warning: Initial point cloud not found at {initial_ply}\")\n", " print(\" Gaussian Splatting will initialize from COLMAP reconstruction\")\n", "\n", " # 🔧 修正2: --ply_pathパラメータを削除\n", " gs_train_script = \"/content/gaussian-splatting/train.py\"\n", "\n", " cmd = [\n", " \"/usr/bin/python3\",\n", " gs_train_script,\n", " \"-s\", str(output_path),\n", " \"-m\", str(gs_output_dir),\n", " \"--iterations\", str(iterations),\n", " \"--eval\"\n", " ]\n", "\n", " print(\"\\n=== Running Gaussian Splatting ===\")\n", " print(f\"Command: {' '.join(cmd)}\")\n", " print(f\" Source: {output_path}\")\n", " print(f\" Output: {gs_output_dir}\")\n", "\n", " try:\n", " result = subprocess.run(\n", " cmd,\n", " capture_output=True,\n", " text=True,\n", " check=False\n", " )\n", "\n", " if result.returncode == 0:\n", " print(\"\\n✓ Gaussian Splatting completed successfully\")\n", "\n", " # 出力ファイルを確認\n", " final_ply = gs_output_dir / \"point_cloud\" / f\"iteration_{iterations}\" / \"point_cloud.ply\"\n", " if final_ply.exists():\n", " print(f\"✓ Final point cloud saved: {final_ply}\")\n", " else:\n", " print(f\"⚠️ Point cloud not found at: {final_ply}\")\n", "\n", " else:\n", " print(f\"\\n✗ Gaussian Splatting failed with return code {result.returncode}\")\n", " print(\"\\n--- STDOUT ---\")\n", " print(result.stdout)\n", " print(\"\\n--- STDERR ---\")\n", " print(result.stderr)\n", "\n", " except Exception as e:\n", " print(f\"\\n✗ Error running Gaussian Splatting: {e}\")\n", "\n", "\n", "\n" ], "metadata": { "id": "pZn5HrJoLpNY" }, "execution_count": 91, "outputs": [] }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# メインパイプライン(修正版統合)\n", "# =====================================================================\n", "def main_pipeline():\n", " \"\"\"完全なパイプラインを実行\"\"\"\n", "\n", " # 設定\n", " IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain\" # 要変更\n", " OUTPUT_DIR = \"/content/output\"\n", " MAX_IMAGES = 30\n", " MAX_PAIRS = 100\n", " GS_ITERATIONS = 1000\n", "\n", " # ステップ1: 画像の前処理(Biplet)\n", " print(\"\\n\" + \"=\" * 70)\n", " print(\"STEP 1: Biplet Image Preprocessing\")\n", " print(\"=\" * 70)\n", "\n", " biplet_dir = normalize_image_sizes_biplet(IMAGE_DIR, size=1024)\n", "\n", " # ステップ2: 画像読み込み\n", " print(\"\\n\" + \"=\" * 70)\n", " print(\"STEP 2: Load Images\")\n", " print(\"=\" * 70)\n", "\n", " image_paths = load_images_from_directory(biplet_dir, max_images=MAX_IMAGES)\n", "\n", " # ステップ3: ペア選択(DINOまたはASMK)\n", " print(\"\\n\" + \"=\" * 70)\n", " print(\"STEP 3: Image Pair Selection\")\n", " print(\"=\" * 70)\n", "\n", " # DINOを使用(推奨)\n", " filtered_paths, pairs = get_image_pairs_dino(image_paths, max_pairs=MAX_PAIRS)\n", "\n", " # またはASMKを使用\n", " # filtered_paths, pairs = get_image_pairs_asmk(image_paths, max_pairs=MAX_PAIRS)\n", "\n", " # ステップ4: MASt3R再構成\n", " print(\"\\n\" + \"=\" * 70)\n", " print(\"STEP 4: MASt3R Reconstruction\")\n", " print(\"=\" * 70)\n", "\n", " model = load_mast3r_model(Config.DEVICE)\n", " scene, images = run_mast3r_pairs(\n", " model,\n", " filtered_paths,\n", " pairs,\n", " Config.DEVICE,\n", " batch_size=1,\n", " max_pairs=MAX_PAIRS\n", " )\n", "\n", " # ステップ4.5: COLMAP形式でエクスポート\n", " print(\"\\n\" + \"=\" * 70)\n", " print(\"STEP 4.5: Export to COLMAP Format\")\n", " print(\"=\" * 70)\n", "\n", " # カメラパラメータと3D点を抽出\n", " cameras_dict, pts3d, confidence = extract_camera_params_process2(\n", " scene,\n", " filtered_paths,\n", " conf_threshold=0.5\n", " )\n", "\n", " # 画像サイズを取得(最初の画像から)\n", " img = Image.open(filtered_paths[0])\n", " image_size = img.size\n", " img.close()\n", "\n", " # COLMAPバイナリファイルをエクスポート\n", " sparse_output_dir = f\"{OUTPUT_DIR}/sparse/0\"\n", " os.makedirs(sparse_output_dir, exist_ok=True)\n", "\n", " export_colmap_binary(\n", " cameras_dict,\n", " pts3d,\n", " confidence,\n", " image_size,\n", " sparse_output_dir\n", " )\n", "\n", " # 🔧 修正: 初期点群を保存\n", " print(\"\\n\" + \"=\" * 70)\n", " print(\"STEP 4.5: Saving Initial Point Cloud\")\n", " print(\"=\" * 70)\n", "\n", " initial_ply_path = f\"{OUTPUT_DIR}/input.ply\"\n", " save_initial_pointcloud(pts3d, confidence, initial_ply_path)\n", "\n", " # 画像をコピー\n", " images_dir = f\"{OUTPUT_DIR}/images\"\n", " os.makedirs(images_dir, exist_ok=True)\n", " for img_path in filtered_paths:\n", " import shutil\n", " shutil.copy(img_path, images_dir)\n", " print(f\"\\n✓ Copied {len(filtered_paths)} images to {images_dir}\")\n", "\n", " # モデルを解放\n", " del model, scene\n", " clear_memory()\n", "\n", " # ステップ5: Gaussian Splatting(修正版)\n", " run_gaussian_splatting_fixed(OUTPUT_DIR, iterations=GS_ITERATIONS)\n", "\n", " print(\"\\n\" + \"=\" * 70)\n", " print(\"PIPELINE COMPLETE\")\n", " print(\"=\" * 70)\n", "\n", " # 出力ファイルの確認\n", " final_ply = f\"{OUTPUT_DIR}/gaussian_splatting/point_cloud/iteration_{GS_ITERATIONS}/point_cloud.ply\"\n", " if os.path.exists(final_ply):\n", " print(f\"✓ Final point cloud: {final_ply}\")\n", " else:\n", " print(f\"⚠️ Point cloud not found at: {final_ply}\")\n", "\n", " print(f\"\\nOutput directory structure:\")\n", " print(f\" {OUTPUT_DIR}/\")\n", " print(f\" ├── images/ (processed images)\")\n", " print(f\" ├── input.ply (initial point cloud)\")\n", " print(f\" ├── sparse/0/ (COLMAP data)\")\n", " print(f\" │ ├── cameras.bin\")\n", " print(f\" │ ├── images.bin\")\n", " print(f\" │ ├── points3D.bin\")\n", " print(f\" │ └── points3D.ply (for GS initialization)\")\n", " print(f\" └── gaussian_splatting/ (GS output)\")\n", "\n" ], "metadata": { "id": "GDRchxLONHlj" }, "execution_count": 94, "outputs": [] }, { "cell_type": "code", "source": [ "# =====================================================================\n", "# 実行\n", "# =====================================================================\n", "if __name__ == \"__main__\":\n", " main_pipeline()" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": [ "bf44c57e4723401c955d48649116565a", "ef6ea005765742188b1cd55fb4530ef9", "9167fbd8a70c4639b2f251343eb9a981", "ccb3263efe67459c87d818292682fd3f", "06d8bf25667c49aab29956b05feb0abb", "e9848f5bd529467fa99163655c7f754c", "d9e37e5619c04646b3d331b56a0e8745", "e11a930ed5164a35a385a636875a16f6", "eb7acaad8f554949a81496e0aad9ccd9", "0f856bd88fc645b0baaa4633fd0f58bd", "c779b806854d4a7cbd20eea5760a2efc", "b2cefd6f37f74514a4fd9f449102d61b", "ef2421d71fbf4fb2b453d87b7b9a907c", "a7e7034a318b4c9784d6aaebd36748c9", "806fa9c3a1c049f1adaa96bdc3271141", "2fb6f473b5234fadb9cb1d06207309a3", "52342bdb5b664bcbbbc1479fb8f30885", "c0e66b36c9d6473b98d2b4f546b39ffb", "f95c52cb012d4ba89580b8ed57aaa4b8", "b1fc0cb4fd794ee4bcc52a173dde94f0", "0861cfd48ac04e6da8b267de142f8580", "5f127d1dadd44aa28367423d1e692967", "1df1dd60ae4244678430258b4929ec41", "9ac5ee7dc2bb40748fa9fc90139a8eeb", "9e6232a28a474a299b7fce684cf48a36", "870ec434cc9346ada1bbb0ddcaa62f4c", "2979f087a4e549318b6024a20220d5d3", "6c5d3c940f3147cb916acc9cd6a7a5e1", "c16b9aef215342459cbac253f9b7b87e", "49ddba95962049e3b022a57bd7d49a19", "36e9a773b2ea47799f31917188c016d7", "702e4c196d10471da4230ee0c3c31a28", "8d61b0a13356489d9c279ff8b74f29ad" ] }, "id": "WpnQal9kNA3k", "outputId": "a2af7ddb-87ec-4681-ab5d-f05daf8132b2" }, "execution_count": 95, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "======================================================================\n", "STEP 1: Biplet Image Preprocessing\n", "======================================================================\n", "\n", "=== Generating Biplet Crops (1024x1024) ===\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Creating biplets: 100%|██████████| 30/30 [00:05<00:00, 5.15it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "\n", "✓ Biplet generation complete:\n", " Source images: 30\n", " Biplet crops generated: 60\n", " Original size distribution: {'1440x1920': 30}\n", "\n", "======================================================================\n", "STEP 2: Load Images\n", "======================================================================\n", "\n", "Loading images from: /content/drive/MyDrive/your_folder/fountain_biplet\n", "⚠️ Limiting from 60 to 30 images\n", "✓ Found 30 images\n", "\n", "======================================================================\n", "STEP 3: Image Pair Selection\n", "======================================================================\n", "\n", "=== Extracting DINO Global Features ===\n", "Initial memory state:\n", "GPU Memory - Allocated: 2.15GB, Reserved: 2.67GB\n", "CPU Memory Usage: 56.0%\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "preprocessor_config.json: 0%| | 0.00/436 [00:00> Loading a list of 30 images\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_001_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_001_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_002_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_002_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_003_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_003_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_004_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_004_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_005_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_005_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_006_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_006_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_007_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_007_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_008_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_008_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_009_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_009_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_010_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_010_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_011_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_011_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_012_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_012_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_013_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_013_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_014_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_014_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_015_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /content/drive/MyDrive/your_folder/fountain_biplet/image_015_top.jpeg with resolution 1024x1024 --> 224x224\n", " (Found 30 images)\n", "Loaded 30 images\n", "After loading images:\n", "GPU Memory - Allocated: 2.15GB, Reserved: 2.26GB\n", "CPU Memory Usage: 54.3%\n", "Creating 100 image pairs...\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Preparing pairs: 100%|██████████| 100/100 [00:00<00:00, 739736.16it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Running MASt3R inference on 100 pairs...\n", ">> Inference with model on 100 image pairs\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "\r 0%| | 0/100 [00:000.5): 1505280 points\n", "COLMAP cameras.bin saved to /content/output/sparse/0/cameras.bin\n", "COLMAP images.bin saved to /content/output/sparse/0/images.bin\n", "COLMAP points3D.bin saved to /content/output/sparse/0/points3D.bin\n", "\n", "COLMAP binary files exported to /content/output/sparse/0/\n", " - cameras.bin: 30 cameras (PINHOLE model)\n", " - images.bin: 30 images\n", " - points3D.bin: 1505280 points\n", "\n", "======================================================================\n", "STEP 4.5: Saving Initial Point Cloud\n", "======================================================================\n", "\n", "Saving initial point cloud to /content/output/input.ply\n", "✓ Saved 1505280 points to /content/output/input.ply\n", "\n", "✓ Copied 30 images to /content/output/images\n", "======================================================================\n", "STEP 5: Running Gaussian Splatting (修正版)\n", "======================================================================\n", "\n", "✓ Initial point cloud found: /content/output/input.ply\n", " Copying to: /content/output/sparse/0/points3D.ply\n", "✓ Copied successfully\n", "\n", "=== Running Gaussian Splatting ===\n", "Command: /usr/bin/python3 /content/gaussian-splatting/train.py -s /content/output -m /content/output/gaussian_splatting --iterations 1000 --eval\n", " Source: /content/output\n", " Output: /content/output/gaussian_splatting\n", "\n", "✓ Gaussian Splatting completed successfully\n", "✓ Final point cloud saved: /content/output/gaussian_splatting/point_cloud/iteration_1000/point_cloud.ply\n", "\n", "======================================================================\n", "PIPELINE COMPLETE\n", "======================================================================\n", "✓ Final point cloud: /content/output/gaussian_splatting/point_cloud/iteration_1000/point_cloud.ply\n", "\n", "Output directory structure:\n", " /content/output/\n", " ├── images/ (processed images)\n", " ├── input.ply (initial point cloud)\n", " ├── sparse/0/ (COLMAP data)\n", " │ ├── cameras.bin\n", " │ ├── images.bin\n", " │ ├── points3D.bin\n", " │ └── points3D.ply (for GS initialization)\n", " └── gaussian_splatting/ (GS output)\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "W_a1-3Gm-npK" }, "execution_count": null, "outputs": [] } ] }