{ "metadata": { "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "colab": { "provenance": [], "gpuType": "T4" }, "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { "e77d9a4ce8544f39a5c9811cba265aa8": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_a02ef012cc8e4c0fa262a81234906301", "IPY_MODEL_4a750e6c7bfa4209a1721acf99331a45", "IPY_MODEL_ed674b75d05b433c85dd201b7013803d" ], "layout": "IPY_MODEL_070af62ea35145c0b509dd4b5c7e8fd3" } }, "a02ef012cc8e4c0fa262a81234906301": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ce4f381f840543d58933a043069ec9f8", "placeholder": "​", "style": "IPY_MODEL_2cb3a9029a9746318667c5a4c76a7dab", "value": "preprocessor_config.json: 100%" } }, "4a750e6c7bfa4209a1721acf99331a45": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d6fe40a036834d919ac7246189e2c81f", "max": 436, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_019e0283956e4f68ad22184f944cf3c6", "value": 436 } }, "ed674b75d05b433c85dd201b7013803d": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_4a768e102dcd463aab632c5ce2b43f1a", "placeholder": "​", "style": "IPY_MODEL_23774a4251de4cb09e1d56c7c5795d05", "value": " 436/436 [00:00<00:00, 45.0kB/s]" } }, "070af62ea35145c0b509dd4b5c7e8fd3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ce4f381f840543d58933a043069ec9f8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2cb3a9029a9746318667c5a4c76a7dab": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "d6fe40a036834d919ac7246189e2c81f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "019e0283956e4f68ad22184f944cf3c6": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "4a768e102dcd463aab632c5ce2b43f1a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "23774a4251de4cb09e1d56c7c5795d05": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "fdfd59523e754effa4878c5680a9616d": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_4b240715214f4c29822073350ac0369d", "IPY_MODEL_14d21c9d1ff840deb4e1223ff847c99c", "IPY_MODEL_f455f2bb8e804a15bcb45aa4f5148d7c" ], "layout": "IPY_MODEL_c36753994d014f809a112247ca2a88d9" } }, "4b240715214f4c29822073350ac0369d": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_5f2c9704ac4d415aa329fb31f34c916f", "placeholder": "​", "style": "IPY_MODEL_537a257c672647af991dbadbfee89bd1", "value": "config.json: 100%" } }, "14d21c9d1ff840deb4e1223ff847c99c": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1c8c3747d86f4d7b900b546bd3b32065", "max": 548, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_7e00de6878a44473a7bc3ae8c386da6e", "value": 548 } }, "f455f2bb8e804a15bcb45aa4f5148d7c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ac48058fb6bb42afad0c59fe59c2e04a", "placeholder": "​", "style": "IPY_MODEL_1bf0ebb06347462ebe5966c67f5acfd9", "value": " 548/548 [00:00<00:00, 61.1kB/s]" } }, "c36753994d014f809a112247ca2a88d9": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "5f2c9704ac4d415aa329fb31f34c916f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "537a257c672647af991dbadbfee89bd1": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "1c8c3747d86f4d7b900b546bd3b32065": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "7e00de6878a44473a7bc3ae8c386da6e": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "ac48058fb6bb42afad0c59fe59c2e04a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1bf0ebb06347462ebe5966c67f5acfd9": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "5f072e098c0c45028553b98bf3e86935": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_033b22f052d648d1a23c13a4ebe42c96", "IPY_MODEL_74f6ceb0670d448795e19382d598bff0", "IPY_MODEL_c1940dbf42f149bf8432f813de5a9514" ], "layout": "IPY_MODEL_f3e807bad5224e11a63657ad2a409fb7" } }, "033b22f052d648d1a23c13a4ebe42c96": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_2d77cbe89b3e43ccac8b060a19871587", "placeholder": "​", "style": "IPY_MODEL_a93447dcb49d476fa7754ba734ea3170", "value": "model.safetensors: 100%" } }, "74f6ceb0670d448795e19382d598bff0": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_52c56de17c5a4e18a068007ea15d4fe4", "max": 346345912, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_25ec7a1ca66a40b283456109f4d43a51", "value": 346345912 } }, "c1940dbf42f149bf8432f813de5a9514": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1be18665ff41490894995b775e2d68c0", "placeholder": "​", "style": "IPY_MODEL_46ed28c510e44663b8dd221bd64a749b", "value": " 346M/346M [00:04<00:00, 178MB/s]" } }, "f3e807bad5224e11a63657ad2a409fb7": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2d77cbe89b3e43ccac8b060a19871587": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "a93447dcb49d476fa7754ba734ea3170": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "52c56de17c5a4e18a068007ea15d4fe4": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "25ec7a1ca66a40b283456109f4d43a51": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "1be18665ff41490894995b775e2d68c0": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "46ed28c510e44663b8dd221bd64a749b": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } } } } }, "nbformat_minor": 0, "nbformat": 4, "cells": [ { "cell_type": "markdown", "source": [ "# **dino-lightglue-colmap-gs-11**\n", "2026/01/10" ], "metadata": { "id": "qDQLX3PArmh8" } }, { "cell_type": "markdown", "source": [ "# **setup**" ], "metadata": { "id": "vXt8y7QyyRn9" } }, { "cell_type": "code", "source": [ "from google.colab import drive\n", "drive.mount('/content/drive')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "t9kAhlZHTIqC", "outputId": "e23a9c7b-f61e-4054-a368-1e389d5f9753" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Mounted at /content/drive\n" ] } ], "execution_count": 1 }, { "cell_type": "code", "source": [ "import os\n", "import sys\n", "import subprocess\n", "from PIL import Image\n", "\n", "def run_cmd(cmd, check=True, capture=False):\n", " \"\"\"Run command with better error handling\"\"\"\n", " print(f\"Running: {' '.join(cmd)}\")\n", " result = subprocess.run(\n", " cmd,\n", " capture_output=capture,\n", " text=True,\n", " check=False\n", " )\n", " if check and result.returncode != 0:\n", " print(f\"❌ Command failed with code {result.returncode}\")\n", " if capture:\n", " print(f\"STDOUT: {result.stdout}\")\n", " print(f\"STDERR: {result.stderr}\")\n", " return result\n", "\n", "def setup_environment():\n", " \"\"\"\n", " Colab environment setup for Gaussian Splatting + LightGlue + pycolmap\n", " Python 3.12 compatible version (v8)\n", " \"\"\"\n", "\n", " print(\"🚀 Setting up COLAB environment (v8 - Python 3.12 compatible)\")\n", "\n", " WORK_DIR = \"/content/gaussian-splatting\"\n", "\n", " # =====================================================================\n", " # STEP 0: NumPy FIX (Python 3.12 compatible)\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 0: Fix NumPy (Python 3.12 compatible)\")\n", " print(\"=\"*70)\n", "\n", " # Python 3.12 requires numpy >= 1.26\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n", "\n", " # sanity check\n", " run_cmd([sys.executable, \"-c\", \"import numpy; print('NumPy:', numpy.__version__)\"])\n", "\n", " # =====================================================================\n", " # STEP 1: System packages (Colab)\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 1: System packages\")\n", " print(\"=\"*70)\n", "\n", " run_cmd([\"apt-get\", \"update\", \"-qq\"])\n", " run_cmd([\n", " \"apt-get\", \"install\", \"-y\", \"-qq\",\n", " \"colmap\",\n", " \"build-essential\",\n", " \"cmake\",\n", " \"git\",\n", " \"libopenblas-dev\",\n", " \"xvfb\"\n", " ])\n", "\n", " # virtual display (COLMAP / OpenCV safety)\n", " os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n", " os.environ[\"DISPLAY\"] = \":99\"\n", " subprocess.Popen(\n", " [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n", " stdout=subprocess.DEVNULL,\n", " stderr=subprocess.DEVNULL\n", " )\n", "\n", " # =====================================================================\n", " # STEP 2: Clone Gaussian Splatting\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 2: Clone Gaussian Splatting\")\n", " print(\"=\"*70)\n", "\n", " if not os.path.exists(WORK_DIR):\n", " run_cmd([\n", " \"git\", \"clone\", \"--recursive\",\n", " \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n", " WORK_DIR\n", " ])\n", " else:\n", " print(\"✓ Repository already exists\")\n", "\n", " # =====================================================================\n", " # STEP 3: Python packages (FIXED ORDER & VERSIONS)\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 3: Python packages (VERBOSE MODE)\")\n", " print(\"=\"*70)\n", "\n", " # ---- PyTorch (Colab CUDA対応) ----\n", " print(\"\\n📦 Installing PyTorch...\")\n", " run_cmd([\n", " sys.executable, \"-m\", \"pip\", \"install\",\n", " \"torch\", \"torchvision\", \"torchaudio\"\n", " ])\n", "\n", " # ---- Core utils ----\n", " print(\"\\n📦 Installing core utilities...\")\n", " run_cmd([\n", " sys.executable, \"-m\", \"pip\", \"install\",\n", " \"opencv-python\",\n", " \"pillow\",\n", " \"imageio\",\n", " \"imageio-ffmpeg\",\n", " \"plyfile\",\n", " \"tqdm\",\n", " \"tensorboard\"\n", " ])\n", "\n", " # ---- transformers (NumPy 1.26 compatible) ----\n", " print(\"\\n📦 Installing transformers (NumPy 1.26 compatible)...\")\n", " # Install transformers with proper dependencies\n", " run_cmd([\n", " sys.executable, \"-m\", \"pip\", \"install\",\n", " \"transformers==4.40.0\"\n", " ])\n", "\n", " # ---- LightGlue stack (GITHUB INSTALL) ----\n", " print(\"\\n📦 Installing LightGlue stack...\")\n", "\n", " # Install kornia first\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"kornia\"])\n", "\n", " # Install h5py (sometimes needed)\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"h5py\"])\n", "\n", " # Install matplotlib (LightGlue dependency)\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"matplotlib\"])\n", "\n", " # Install LightGlue directly from GitHub (more reliable)\n", " print(\" Installing LightGlue from GitHub...\")\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\",\n", " \"git+https://github.com/cvg/LightGlue.git\"])\n", "\n", " # Install pycolmap\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n", "\n", " # =====================================================================\n", " # STEP 4: Build GS submodules\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 4: Build Gaussian Splatting submodules\")\n", " print(\"=\"*70)\n", "\n", " submodules = {\n", " \"diff-gaussian-rasterization\":\n", " \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n", " \"simple-knn\":\n", " \"https://github.com/camenduru/simple-knn.git\"\n", " }\n", "\n", " for name, repo in submodules.items():\n", " print(f\"\\n📦 Installing {name}...\")\n", " path = os.path.join(WORK_DIR, \"submodules\", name)\n", " if not os.path.exists(path):\n", " run_cmd([\"git\", \"clone\", repo, path])\n", " run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n", "\n", " # =====================================================================\n", " # STEP 5: Detailed Verification\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 5: Detailed Verification\")\n", " print(\"=\"*70)\n", "\n", " # NumPy (verify version first)\n", " print(\"\\n🔍 Testing NumPy...\")\n", " try:\n", " import numpy as np\n", " print(f\" ✓ NumPy: {np.__version__}\")\n", " except Exception as e:\n", " print(f\" ❌ NumPy failed: {e}\")\n", "\n", " # PyTorch\n", " print(\"\\n🔍 Testing PyTorch...\")\n", " try:\n", " import torch\n", " print(f\" ✓ PyTorch: {torch.__version__}\")\n", " print(f\" ✓ CUDA available: {torch.cuda.is_available()}\")\n", " if torch.cuda.is_available():\n", " print(f\" ✓ CUDA version: {torch.version.cuda}\")\n", " except Exception as e:\n", " print(f\" ❌ PyTorch failed: {e}\")\n", "\n", " # transformers\n", " print(\"\\n🔍 Testing transformers...\")\n", " try:\n", " import transformers\n", " print(f\" ✓ transformers version: {transformers.__version__}\")\n", " from transformers import AutoModel\n", " print(f\" ✓ AutoModel import: OK\")\n", " except Exception as e:\n", " print(f\" ❌ transformers failed: {e}\")\n", " print(f\" Attempting detailed diagnosis...\")\n", " result = run_cmd([\n", " sys.executable, \"-c\",\n", " \"import transformers; print(transformers.__version__)\"\n", " ], capture=True)\n", " print(f\" Output: {result.stdout}\")\n", " print(f\" Error: {result.stderr}\")\n", "\n", " # LightGlue\n", " print(\"\\n🔍 Testing LightGlue...\")\n", " try:\n", " from lightglue import LightGlue, ALIKED\n", " print(f\" ✓ LightGlue: OK\")\n", " print(f\" ✓ ALIKED: OK\")\n", " except Exception as e:\n", " print(f\" ❌ LightGlue failed: {e}\")\n", " print(f\" Attempting detailed diagnosis...\")\n", " result = run_cmd([\n", " sys.executable, \"-c\",\n", " \"from lightglue import LightGlue\"\n", " ], capture=True)\n", " print(f\" Output: {result.stdout}\")\n", " print(f\" Error: {result.stderr}\")\n", "\n", " # pycolmap\n", " print(\"\\n🔍 Testing pycolmap...\")\n", " try:\n", " import pycolmap\n", " print(f\" ✓ pycolmap: OK\")\n", " except Exception as e:\n", " print(f\" ❌ pycolmap failed: {e}\")\n", "\n", " # kornia\n", " print(\"\\n🔍 Testing kornia...\")\n", " try:\n", " import kornia\n", " print(f\" ✓ kornia: {kornia.__version__}\")\n", " except Exception as e:\n", " print(f\" ❌ kornia failed: {e}\")\n", "\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"✅ SETUP COMPLETE\")\n", " print(\"=\"*70)\n", " print(f\"Working dir: {WORK_DIR}\")\n", "\n", " return WORK_DIR\n", "\n", "\n", "if __name__ == \"__main__\":\n", " setup_environment()" ], "metadata": { "id": "z6cBHbABzZ0F", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "1266b2b0-5ce7-42fa-da48-3f223093dcd0" }, "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "🚀 Setting up COLAB environment (v8 - Python 3.12 compatible)\n", "\n", "======================================================================\n", "STEP 0: Fix NumPy (Python 3.12 compatible)\n", "======================================================================\n", "Running: /usr/bin/python3 -m pip uninstall -y numpy\n", "Running: /usr/bin/python3 -m pip install numpy==1.26.4\n", "Running: /usr/bin/python3 -c import numpy; print('NumPy:', numpy.__version__)\n", "\n", "======================================================================\n", "STEP 1: System packages\n", "======================================================================\n", "Running: apt-get update -qq\n", "Running: apt-get install -y -qq colmap build-essential cmake git libopenblas-dev xvfb\n", "\n", "======================================================================\n", "STEP 2: Clone Gaussian Splatting\n", "======================================================================\n", "Running: git clone --recursive https://github.com/graphdeco-inria/gaussian-splatting.git /content/gaussian-splatting\n", "\n", "======================================================================\n", "STEP 3: Python packages (VERBOSE MODE)\n", "======================================================================\n", "\n", "📦 Installing PyTorch...\n", "Running: /usr/bin/python3 -m pip install torch torchvision torchaudio\n", "\n", "📦 Installing core utilities...\n", "Running: /usr/bin/python3 -m pip install opencv-python pillow imageio imageio-ffmpeg plyfile tqdm tensorboard\n", "\n", "📦 Installing transformers (NumPy 1.26 compatible)...\n", "Running: /usr/bin/python3 -m pip install transformers==4.40.0\n", "\n", "📦 Installing LightGlue stack...\n", "Running: /usr/bin/python3 -m pip install kornia\n", "Running: /usr/bin/python3 -m pip install h5py\n", "Running: /usr/bin/python3 -m pip install matplotlib\n", " Installing LightGlue from GitHub...\n", "Running: /usr/bin/python3 -m pip install git+https://github.com/cvg/LightGlue.git\n", "Running: /usr/bin/python3 -m pip install pycolmap\n", "\n", "======================================================================\n", "STEP 4: Build Gaussian Splatting submodules\n", "======================================================================\n", "\n", "📦 Installing diff-gaussian-rasterization...\n", "Running: /usr/bin/python3 -m pip install /content/gaussian-splatting/submodules/diff-gaussian-rasterization\n", "\n", "📦 Installing simple-knn...\n", "Running: /usr/bin/python3 -m pip install /content/gaussian-splatting/submodules/simple-knn\n", "\n", "======================================================================\n", "STEP 5: Detailed Verification\n", "======================================================================\n", "\n", "🔍 Testing NumPy...\n", " ✓ NumPy: 2.0.2\n", "\n", "🔍 Testing PyTorch...\n", " ✓ PyTorch: 2.9.0+cu126\n", " ✓ CUDA available: True\n", " ✓ CUDA version: 12.6\n", "\n", "🔍 Testing transformers...\n", " ✓ transformers version: 4.40.0\n", " ✓ AutoModel import: OK\n", "\n", "🔍 Testing LightGlue...\n", " ✓ LightGlue: OK\n", " ✓ ALIKED: OK\n", "\n", "🔍 Testing pycolmap...\n", " ✓ pycolmap: OK\n", "\n", "🔍 Testing kornia...\n", " ✓ kornia: 0.8.2\n", "\n", "======================================================================\n", "✅ SETUP COMPLETE\n", "======================================================================\n", "Working dir: /content/gaussian-splatting\n" ] } ] }, { "cell_type": "code", "source": [ "import os\n", "\n", "%cd /content/gaussian-splatting\n", "\n", "files = ['database.py', 'h5_to_db.py', 'metric.py']\n", "base_url = 'https://huggingface.co/stpete2/imc25_utils/resolve/main/'\n", "\n", "for file in files:\n", " if not os.path.exists(file):\n", " !wget -q {base_url + file}\n", " print(f\"✓ {file} download complete\")\n", " else:\n", " print(f\"✓ {file} already exists\")\n" ], "metadata": { "id": "eJrkKiCLzt1G", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "4e8be0f2-4001-42b8-8e05-f57eff91ab43" }, "execution_count": 3, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "/content/gaussian-splatting\n", "✓ database.py download complete\n", "✓ h5_to_db.py download complete\n", "✓ metric.py download complete\n" ] } ] }, { "cell_type": "markdown", "source": [ "# **install libraries**" ], "metadata": { "id": "DwyCRLt4yYfx" } }, { "cell_type": "code", "source": [ "from database import COLMAPDatabase, image_ids_to_pair_id\n", "from h5_to_db import add_keypoints, add_matches\n", "from metric import *" ], "metadata": { "id": "WVr8ggyVuq6q" }, "execution_count": 4, "outputs": [] }, { "cell_type": "code", "source": [ "\"\"\"\n", "Gaussian Splatting Pipeline\n", "Simple and robust pipeline: LightGlue → COLMAP → Gaussian Splatting\n", "\"\"\"\n", "\n", "import os\n", "import sys\n", "import gc\n", "import h5py\n", "import numpy as np\n", "import torch\n", "import torch.nn.functional as F\n", "from tqdm import tqdm\n", "from pathlib import Path\n", "import subprocess\n", "\n", "# LightGlue\n", "from lightglue import ALIKED, LightGlue\n", "from lightglue.utils import load_image\n", "\n", "# Transformers for DINO\n", "from transformers import AutoImageProcessor, AutoModel\n", "\n", "\n", "# ============================================================================\n", "# Configuration\n", "# ============================================================================\n", "class Config:\n", " # Feature extraction\n", " N_KEYPOINTS = 8192\n", " IMAGE_SIZE = 1024\n", "\n", " # Pair selection\n", " GLOBAL_TOPK = 200\n", " MIN_MATCHES = 10\n", " RATIO_THR = 1.2\n", "\n", " # Paths\n", " DINO_MODEL = \"facebook/dinov2-base\" # Change if using local path\n", "\n", " # Device\n", " DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" ], "metadata": { "id": "7NfrJdMvrPZn" }, "outputs": [], "execution_count": 5 }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 0: images_square\n", "# ============================================================================\n", "\n", "def preprocess_images_square(input_dir, output_dir, size=1024, background='black'):\n", " \"\"\"\n", " Standardize all images to a square format (maintaining aspect ratio with padding).\n", "\n", " Args:\n", " input_dir (str): Directory containing input images.\n", " output_dir (str): Directory to save processed images.\n", " size (int): Target square dimension (default: 1024).\n", " background (str): Background style: 'black', 'white', or 'blur'.\n", " \"\"\"\n", " from PIL import Image, ImageFilter\n", " import os\n", " from tqdm import tqdm\n", "\n", " print(f\"\\n=== Preprocessing to {size}x{size} Square Images ===\")\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " image_files = sorted([\n", " f for f in os.listdir(input_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n", " ])\n", "\n", " stats = {\n", " 'total': len(image_files),\n", " 'landscape': 0,\n", " 'portrait': 0,\n", " 'square': 0,\n", " 'resized': 0,\n", " }\n", "\n", " for img_file in tqdm(image_files, desc=\"Converting to square\"):\n", " img_path = os.path.join(input_dir, img_file)\n", " img = Image.open(img_path).convert('RGB')\n", "\n", " width, height = img.size\n", "\n", " # Statistics\n", " if width > height:\n", " stats['landscape'] += 1\n", " elif width < height:\n", " stats['portrait'] += 1\n", " else:\n", " stats['square'] += 1\n", "\n", " # Resize based on the longest side\n", " max_dim = max(width, height)\n", " if max_dim != size:\n", " scale = size / max_dim\n", " new_width = int(width * scale)\n", " new_height = int(height * scale)\n", " img = img.resize((new_width, new_height), Image.LANCZOS)\n", " stats['resized'] += 1\n", " else:\n", " new_width, new_height = width, height\n", "\n", " # Create background\n", " if background == 'black':\n", " canvas = Image.new('RGB', (size, size), (0, 0, 0))\n", " elif background == 'white':\n", " canvas = Image.new('RGB', (size, size), (255, 255, 255))\n", " elif background == 'blur':\n", " # Use a blurred version of the image as background for a professional look\n", " canvas = img.resize((size, size), Image.LANCZOS)\n", " canvas = canvas.filter(ImageFilter.GaussianBlur(radius=20))\n", " else:\n", " canvas = Image.new('RGB', (size, size), (0, 0, 0))\n", "\n", " # Center the image\n", " offset_x = (size - new_width) // 2\n", " offset_y = (size - new_height) // 2\n", " canvas.paste(img, (offset_x, offset_y))\n", "\n", " # Save output\n", " output_path = os.path.join(output_dir, img_file)\n", " canvas.save(output_path, quality=95, optimize=True)\n", "\n", " print(f\"\\n✓ Preprocessing complete:\")\n", " print(f\" Total images: {stats['total']}\")\n", " print(f\" Landscape: {stats['landscape']} / Portrait: {stats['portrait']} / Square: {stats['square']}\")\n", " print(f\" Resized: {stats['resized']}\")\n", " print(f\" Output size: {size}x{size}\")\n", "\n", " return output_dir" ], "metadata": { "id": "TkVzKRqsvxFZ" }, "execution_count": 6, "outputs": [] }, { "cell_type": "code", "source": [ "def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n", " \"\"\"\n", " Generates two square crops (Left & Right or Top & Bottom)\n", " from each image in a directory.\n", " \"\"\"\n", " if output_dir is None:\n", " output_dir = input_dir\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n", " print()\n", "\n", " converted_count = 0\n", " size_stats = {}\n", "\n", " for img_file in sorted(os.listdir(input_dir)):\n", " if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n", " continue\n", "\n", " input_path = os.path.join(input_dir, img_file)\n", "\n", " try:\n", " img = Image.open(input_path)\n", " original_size = img.size\n", "\n", " size_key = f\"{original_size[0]}x{original_size[1]}\"\n", " size_stats[size_key] = size_stats.get(size_key, 0) + 1\n", "\n", " # Generate 2 crops\n", " crops = generate_two_crops(img, size)\n", "\n", " base_name, ext = os.path.splitext(img_file)\n", " for mode, cropped_img in crops.items():\n", " output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n", " cropped_img.save(output_path, quality=95)\n", "\n", " converted_count += 1\n", " print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n", "\n", " except Exception as e:\n", " print(f\" ✗ Error processing {img_file}: {e}\")\n", "\n", " print(f\"\\nProcessing complete: {converted_count} source images processed\")\n", " print(f\"Original size distribution: {size_stats}\")\n", " return converted_count\n", "\n", "\n", "def generate_two_crops(img, size):\n", " \"\"\"\n", " Crops the image into a square and returns 2 variations\n", " (Left/Right for landscape, Top/Bottom for portrait).\n", " \"\"\"\n", " width, height = img.size\n", " crop_size = min(width, height)\n", " crops = {}\n", "\n", " if width > height:\n", " # Landscape → Left & Right\n", " positions = {\n", " 'left': 0,\n", " 'right': width - crop_size\n", " }\n", " for mode, x_offset in positions.items():\n", " box = (x_offset, 0, x_offset + crop_size, crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " else:\n", " # Portrait or Square → Top & Bottom\n", " positions = {\n", " 'top': 0,\n", " 'bottom': height - crop_size\n", " }\n", " for mode, y_offset in positions.items():\n", " box = (0, y_offset, crop_size, y_offset + crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " return crops" ], "metadata": { "id": "A6smO9X0el3d" }, "execution_count": 13, "outputs": [] }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 1: Image Pair Selection (DINO + ALIKED local verify)\n", "# ============================================================================\n", "\n", "def load_torch_image(fname, device):\n", " \"\"\"Load image as torch tensor\"\"\"\n", " from PIL import Image\n", " import torchvision.transforms as T\n", "\n", " img = Image.open(fname).convert('RGB')\n", " transform = T.Compose([\n", " T.ToTensor(),\n", " ])\n", " return transform(img).unsqueeze(0).to(device)\n", "\n", "def extract_dino_global(image_paths, model_path, device):\n", " \"\"\"Extract DINO global descriptors\"\"\"\n", " print(\"\\n=== Extracting DINO Global Features ===\")\n", "\n", " processor = AutoImageProcessor.from_pretrained(model_path)\n", " model = AutoModel.from_pretrained(model_path).eval().to(device)\n", "\n", " global_descs = []\n", " for img_path in tqdm(image_paths):\n", " img = load_torch_image(img_path, device)\n", " with torch.no_grad():\n", " inputs = processor(images=img, return_tensors=\"pt\", do_rescale=False).to(device)\n", " outputs = model(**inputs)\n", " desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n", " global_descs.append(desc.cpu())\n", "\n", " global_descs = torch.cat(global_descs, dim=0)\n", "\n", " del model\n", " torch.cuda.empty_cache()\n", " gc.collect()\n", "\n", " return global_descs\n", "\n", "def build_topk_pairs(global_feats, k, device):\n", " \"\"\"Build top-k similar pairs from global features\"\"\"\n", " g = global_feats.to(device)\n", " sim = g @ g.T\n", " sim.fill_diagonal_(-1)\n", "\n", " N = sim.size(0)\n", " k = min(k, N - 1)\n", "\n", " topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n", "\n", " pairs = []\n", " for i in range(N):\n", " for j in topk_indices[i]:\n", " j = j.item()\n", " if i < j:\n", " pairs.append((i, j))\n", "\n", " return list(set(pairs))\n", "\n", "def extract_aliked_features(image_paths, device):\n", " \"\"\"Extract ALIKED local features\"\"\"\n", " print(\"\\n=== Extracting ALIKED Local Features ===\")\n", "\n", " extractor = ALIKED(\n", " model_name=\"aliked-n16\",\n", " max_num_keypoints=Config.N_KEYPOINTS,\n", " detection_threshold=0.01,\n", " resize=Config.IMAGE_SIZE\n", " ).eval().to(device)\n", "\n", " features = []\n", " for img_path in tqdm(image_paths):\n", " img = load_torch_image(img_path, device)\n", " with torch.no_grad():\n", " feats = extractor.extract(img)\n", " kpts = feats['keypoints'].reshape(-1, 2).cpu()\n", " descs = feats['descriptors'].reshape(len(kpts), -1).cpu()\n", " features.append({'keypoints': kpts, 'descriptors': descs})\n", "\n", " del extractor\n", " torch.cuda.empty_cache()\n", " gc.collect()\n", "\n", " return features\n", "\n", "def verify_pairs_locally(pairs, features, device, threshold=Config.MIN_MATCHES):\n", " \"\"\"Verify pairs using local descriptor matching\"\"\"\n", " print(\"\\n=== Verifying Pairs with Local Features ===\")\n", "\n", " verified = []\n", " for i, j in tqdm(pairs):\n", " desc1 = features[i]['descriptors'].to(device)\n", " desc2 = features[j]['descriptors'].to(device)\n", "\n", " if len(desc1) == 0 or len(desc2) == 0:\n", " continue\n", "\n", " # Simple mutual nearest neighbor\n", " dist = torch.cdist(desc1, desc2, p=2)\n", " min_dist = dist.min(dim=1)[0]\n", " n_matches = (min_dist < Config.RATIO_THR).sum().item()\n", "\n", " if n_matches >= threshold:\n", " verified.append((i, j))\n", "\n", " return verified\n", "\n", "def get_image_pairs(image_paths):\n", " \"\"\"Main pair selection pipeline\"\"\"\n", " device = Config.DEVICE\n", "\n", " # 1. DINO global\n", " global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n", " pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n", "\n", " print(f\"Initial pairs from global features: {len(pairs)}\")\n", "\n", " # 2. ALIKED local\n", " features = extract_aliked_features(image_paths, device)\n", "\n", " # 3. Local verification\n", " verified_pairs = verify_pairs_locally(pairs, features, device)\n", "\n", " print(f\"Verified pairs: {len(verified_pairs)}\")\n", "\n", " return verified_pairs, features" ], "metadata": { "id": "FNjFURfYmVcL" }, "outputs": [], "execution_count": 7 }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 2: Feature Matching (ALIKED + LightGlue)\n", "# ============================================================================\n", "\n", "def match_pairs_lightglue(image_paths, pairs, features, output_dir):\n", " \"\"\"\n", " Match image pairs using LightGlue\n", " \"\"\"\n", " print(\"\\n=== Matching with LightGlue ===\")\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", " keypoints_path = os.path.join(output_dir, 'keypoints.h5')\n", " matches_path = os.path.join(output_dir, 'matches.h5')\n", "\n", " if os.path.exists(keypoints_path):\n", " os.remove(keypoints_path)\n", " if os.path.exists(matches_path):\n", " os.remove(matches_path)\n", "\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " extractor = ALIKED(max_num_keypoints=4096, detection_threshold=0.2, nms_radius=2).eval().to(device)\n", " matcher = LightGlue(features='aliked').eval().to(device)\n", "\n", " if isinstance(features, dict):\n", " all_keypoints = features['keypoints']\n", " all_descriptors = features['descriptors']\n", " elif isinstance(features, list):\n", " all_keypoints = [f['keypoints'] for f in features]\n", " all_descriptors = [f['descriptors'] for f in features]\n", " else:\n", " raise ValueError(f\"Unsupported features type: {type(features)}\")\n", "\n", " with h5py.File(keypoints_path, 'w') as f_kp:\n", " for idx, img_path in enumerate(tqdm(image_paths, desc=\"Saving keypoints\")):\n", " img_name = os.path.splitext(os.path.basename(img_path))[0]\n", "\n", " kp = all_keypoints[idx]\n", " if torch.is_tensor(kp):\n", " kp = kp.cpu().numpy()\n", " f_kp.create_dataset(img_name, data=kp)\n", "\n", " # Match pairs\n", " with h5py.File(matches_path, 'w') as f_match:\n", " for idx1, idx2 in tqdm(pairs, desc=\"Matching\"):\n", " with torch.no_grad():\n", " kp0 = all_keypoints[idx1]\n", " kp1 = all_keypoints[idx2]\n", " desc0 = all_descriptors[idx1]\n", " desc1 = all_descriptors[idx2]\n", "\n", " if isinstance(kp0, np.ndarray):\n", " kp0 = torch.from_numpy(kp0).float().to(device)\n", " kp1 = torch.from_numpy(kp1).float().to(device)\n", " desc0 = torch.from_numpy(desc0).float().to(device)\n", " desc1 = torch.from_numpy(desc1).float().to(device)\n", " else:\n", " kp0 = kp0.float().to(device)\n", " kp1 = kp1.float().to(device)\n", " desc0 = desc0.float().to(device)\n", " desc1 = desc1.float().to(device)\n", "\n", " feats0 = {\n", " 'keypoints': kp0.unsqueeze(0) if kp0.dim() == 2 else kp0,\n", " 'descriptors': desc0.unsqueeze(0) if desc0.dim() == 2 else desc0,\n", " }\n", " feats1 = {\n", " 'keypoints': kp1.unsqueeze(0) if kp1.dim() == 2 else kp1,\n", " 'descriptors': desc1.unsqueeze(0) if desc1.dim() == 2 else desc1,\n", " }\n", "\n", " matches01 = matcher({'image0': feats0, 'image1': feats1})\n", "\n", " if 'matches0' in matches01:\n", " matches0 = matches01['matches0'].cpu().numpy()\n", " if matches0.ndim > 1:\n", " matches0 = matches0[0]\n", " valid = matches0 > -1\n", " matches = np.stack([np.where(valid)[0], matches0[valid]], axis=1)\n", " elif 'matches' in matches01:\n", " m = matches01['matches']\n", " if isinstance(m, list):\n", " matches = np.array(m)\n", " elif hasattr(m, 'cpu'):\n", " matches = m.cpu().numpy()\n", " else:\n", " matches = np.array(m)\n", " else:\n", " continue\n", "\n", " if len(matches) > 0:\n", " img_name1 = os.path.splitext(os.path.basename(image_paths[idx1]))[0]\n", " img_name2 = os.path.splitext(os.path.basename(image_paths[idx2]))[0]\n", " pair_key = f\"{img_name1}_{img_name2}\"\n", " f_match.create_dataset(pair_key, data=matches)\n", "\n", " print(f\"✓ Matches saved to {matches_path}\")\n" ], "metadata": { "id": "X-PKgmdwmVcL" }, "outputs": [], "execution_count": 8 }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 3: Import to COLMAP\n", "# ============================================================================\n", "\n", "def import_to_colmap(image_dir, feature_dir, database_path, single_camera=True):\n", " \"\"\"\n", " Import features and matches to COLMAP database\n", "\n", " Args:\n", " image_dir (str): Directory containing the images.\n", " feature_dir (str): Directory to save/load extracted features.\n", " database_path (str): Path to the database file.\n", " single_camera (bool): Set to True if all images have the same dimensions (e.g., pre-resized).\n", " \"\"\"\n", " print(\"\\n=== Creating COLMAP Database ===\")\n", "\n", " if os.path.exists(database_path):\n", " os.remove(database_path)\n", " print(f\"✓ Removed existing database\")\n", "\n", " db = COLMAPDatabase.connect(database_path)\n", " db.create_tables()\n", "\n", " print(f\"Single camera mode: {single_camera}\")\n", "\n", " image_files = [f for f in os.listdir(image_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))]\n", " if not image_files:\n", " raise ValueError(f\"No images found in {image_dir}\")\n", "\n", " first_image = sorted(image_files)[0]\n", " img_ext = os.path.splitext(first_image)[1]\n", " print(f\"Detected image extension: '{img_ext}'\")\n", "\n", " fname_to_id = add_keypoints(\n", " db,\n", " feature_dir,\n", " image_dir,\n", " img_ext,\n", " 'PINHOLE',\n", " single_camera=single_camera\n", " )\n", "\n", " add_matches(db, feature_dir, fname_to_id)\n", " db.commit()\n", " db.close()\n", "\n", " print(f\"✓ Database created: {database_path}\")\n", "\n", "# ============================================================================\n", "# Step 4: Run COLMAP Mapper\n", "# ============================================================================\n", "\n", "def run_colmap_mapper(database_path, image_dir, output_dir):\n", " \"\"\"\n", " Run COLMAP mapper with verbose output\n", " \"\"\"\n", " print(\"\\n=== Running COLMAP Reconstruction ===\")\n", " os.makedirs(output_dir, exist_ok=True)\n", " cmd = [\n", " 'colmap', 'mapper',\n", " '--database_path', database_path,\n", " '--image_path', image_dir,\n", " '--output_path', output_dir,\n", " '--Mapper.ba_refine_focal_length', '0',\n", " '--Mapper.ba_refine_principal_point', '0',\n", " '--Mapper.ba_refine_extra_params', '0',\n", " '--Mapper.min_num_matches', '15',\n", " '--Mapper.init_min_num_inliers', '50',\n", " '--Mapper.max_num_models', '1',\n", " '--Mapper.num_threads', '16',\n", " ]\n", " print(f\"Command: {' '.join(cmd)}\\n\")\n", "\n", " process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n", " for line in process.stdout:\n", " print(line, end='')\n", " process.wait()\n", " if process.returncode == 0:\n", " model_dir = os.path.join(output_dir, '0')\n", " if os.path.exists(model_dir):\n", " print(f\"\\n✓ COLMAP reconstruction complete: {model_dir}\")\n", " return model_dir\n", " raise RuntimeError(\"COLMAP reconstruction failed\")" ], "metadata": { "id": "NJedFruCmVcL" }, "outputs": [], "execution_count": 9 }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 5: Convert to Gaussian Splatting Format (if needed)\n", "# ============================================================================\n", "\n", "def convert_to_gs_format(colmap_model_dir, output_dir):\n", " \"\"\"\n", " Verify COLMAP output and prepare paths for Gaussian Splatting.\n", "\n", " Args:\n", " colmap_model_dir (str): Path to the COLMAP sparse/0 directory.\n", " Example: /content/output/colmap/sparse/0\n", " output_dir (str): Base output directory.\n", "\n", " Returns:\n", " colmap_parent_dir (str): The path to be passed to Gaussian Splatting.\n", " Example: /content/output/colmap (Parent directory containing 'sparse/')\n", " \"\"\"\n", " print(\"\\n=== Verifying COLMAP Model for Gaussian Splatting ===\")\n", "\n", " import pycolmap\n", " reconstruction = pycolmap.Reconstruction(colmap_model_dir)\n", "\n", " print(f\"Registered images: {len(reconstruction.images)}\")\n", " print(f\"3D points: {len(reconstruction.points3D)}\")\n", "\n", " # Check for files required by Gaussian Splatting\n", " required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n", " for file in required_files:\n", " file_path = os.path.join(colmap_model_dir, file)\n", " if not os.path.exists(file_path):\n", " raise FileNotFoundError(f\"Required file not found: {file}\")\n", " print(f\" ✓ {file}\")\n", "\n", " # Return the grandparent directory of sparse/0\n", " # /content/output/colmap/sparse/0 -> /content/output/colmap\n", " colmap_parent_dir = os.path.dirname(os.path.dirname(colmap_model_dir))\n", "\n", " print(f\"\\n✓ COLMAP model ready for Gaussian Splatting\")\n", " print(f\" Source path: {colmap_parent_dir}\")\n", "\n", " return colmap_parent_dir" ], "metadata": { "id": "4IioqnC1mVcM" }, "outputs": [], "execution_count": 10 }, { "cell_type": "code", "source": [ "def train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=30000):\n", " \"\"\"\n", " Train a Gaussian Splatting model.\n", "\n", " Args:\n", " colmap_dir (str): COLMAP parent directory (the directory containing 'sparse/').\n", " Example: /content/output/colmap\n", " image_dir (str): Directory containing training images.\n", " Example: /content/output/processed_images\n", " output_dir (str): Base directory for Gaussian Splatting output.\n", " iterations (int): Total number of training iterations.\n", "\n", " Returns:\n", " gs_output_dir (str): Path to the generated Gaussian Splatting output.\n", " \"\"\"\n", " print(\"\\n=== Training Gaussian Splatting ===\")\n", "\n", " gs_output_dir = os.path.join(output_dir, 'gs_output')\n", " os.makedirs(gs_output_dir, exist_ok=True)\n", "\n", " # Verify the Gaussian Splatting directory structure\n", " sparse_dir = os.path.join(colmap_dir, 'sparse', '0')\n", " if not os.path.exists(sparse_dir):\n", " raise FileNotFoundError(f\"COLMAP sparse directory not found: {sparse_dir}\")\n", "\n", " print(f\"COLMAP sparse model: {sparse_dir}\")\n", " print(f\"Training images: {image_dir}\")\n", " print(f\"Output: {gs_output_dir}\")\n", " print(f\"Iterations: {iterations}\")\n", "\n", " # Gaussian Splatting command\n", " cmd = [\n", " 'python', 'train.py',\n", " '-s', colmap_dir, # Source directory (must contain 'sparse/')\n", " '--images', image_dir, # Explicitly specify the images directory\n", " '-m', gs_output_dir, # Model output directory\n", " '--iterations', str(iterations),\n", " '--test_iterations', str(iterations//2), str(iterations),\n", " '--save_iterations', str(iterations//2), str(iterations),\n", " ]\n", "\n", " print(f\"\\nCommand: {' '.join(cmd)}\\n\")\n", "\n", " result = subprocess.run(cmd, capture_output=True, text=True)\n", "\n", " print(result.stdout)\n", " if result.stderr:\n", " print(\"STDERR:\", result.stderr)\n", "\n", " if result.returncode != 0:\n", " raise RuntimeError(\"Gaussian Splatting training failed\")\n", "\n", " # Check for the existence of the generated PLY file\n", " ply_path = os.path.join(gs_output_dir, 'point_cloud', f'iteration_{iterations}', 'point_cloud.ply')\n", " if os.path.exists(ply_path):\n", " size_mb = os.path.getsize(ply_path) / (1024 * 1024)\n", " print(f\"\\n✓ Training complete!\")\n", " print(f\" PLY file: {ply_path}\")\n", " print(f\" Size: {size_mb:.2f} MB\")\n", " else:\n", " print(f\"⚠️ Warning: PLY file not found at the expected location\")\n", "\n", " return gs_output_dir" ], "metadata": { "id": "EiHoRSfzQ01b" }, "execution_count": 11, "outputs": [] }, { "cell_type": "markdown", "source": [ "# **main**" ], "metadata": { "id": "IqNcsheVywit" } }, { "cell_type": "code", "source": [ "def main_pipeline(image_dir, output_dir, square_size=1024):\n", " \"\"\"\n", " Complete pipeline: Images → Square Processing → COLMAP → Gaussian Splatting\n", " \"\"\"\n", " print(\"=\"*70)\n", " print(\"Gaussian Splatting Preparation Pipeline\")\n", " print(\"=\"*70)\n", "\n", " # Step 0: Standardize images to square format\n", " #processed_dir = os.path.join(output_dir, 'processed_images')\n", " #processed_image_dir = preprocess_images_square(image_dir, processed_dir, size=square_size)\n", "\n", " processed_image_dir = os.path.join(output_dir, \"processed_images\")\n", "\n", " normalize_image_sizes_biplet(\n", " input_dir=image_dir,\n", " output_dir=processed_image_dir,\n", " size=square_size\n", ")\n", "\n", " # Setup paths\n", " feature_dir = os.path.join(output_dir, 'features')\n", " colmap_dir = os.path.join(output_dir, 'colmap')\n", " database_path = os.path.join(colmap_dir, 'database.db')\n", " sparse_dir = os.path.join(colmap_dir, 'sparse')\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", " os.makedirs(colmap_dir, exist_ok=True)\n", "\n", " # Get image paths\n", " image_paths = sorted([\n", " os.path.join(processed_image_dir, f)\n", " for f in os.listdir(processed_image_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n", " ])\n", "\n", " print(f\"\\n📸 Found {len(image_paths)} images\")\n", "\n", " # Step 1: Generate image pairs\n", " pairs, features = get_image_pairs(image_paths)\n", "\n", " # Step 2: Feature matching with LightGlue\n", " match_pairs_lightglue(image_paths, pairs, features, feature_dir)\n", "\n", " # Step 3: Import data into COLMAP\n", " # (single_camera=True assumes uniform image dimensions)\n", " import_to_colmap(processed_image_dir, feature_dir, database_path, single_camera=True)\n", "\n", " # Step 4: Run COLMAP Sparse Reconstruction\n", " model_dir = run_colmap_mapper(database_path, processed_image_dir, sparse_dir)\n", "\n", " # Step 5: Verify and prepare for Gaussian Splatting\n", " colmap_parent = convert_to_gs_format(model_dir, output_dir)\n", "\n", " # Step 6: Train Gaussian Splatting model\n", " gs_output = train_gaussian_splatting(\n", " colmap_dir=colmap_parent,\n", " image_dir=processed_image_dir,\n", " output_dir=output_dir,\n", " iterations=6000\n", " )\n", "\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"✅ Full Pipeline Successfully Completed!\")\n", " print(\"=\"*70)\n", " print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n", "\n", " return gs_output\n", "\n", "\n", "# Example usage\n", "if __name__ == \"__main__\":\n", " # Example: Tourist photos with varying resolutions/aspect ratios\n", " IMAGE_DIR = \"/content/drive/MyDrive/your_folder/grand_place\"\n", " OUTPUT_DIR = \"/content/output\"\n", "\n", " gs_output = main_pipeline(IMAGE_DIR, OUTPUT_DIR, square_size=1024)" ], "metadata": { "id": "5-_UvgTtRiC_", "colab": { "base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": [ "e77d9a4ce8544f39a5c9811cba265aa8", "a02ef012cc8e4c0fa262a81234906301", "4a750e6c7bfa4209a1721acf99331a45", "ed674b75d05b433c85dd201b7013803d", "070af62ea35145c0b509dd4b5c7e8fd3", "ce4f381f840543d58933a043069ec9f8", "2cb3a9029a9746318667c5a4c76a7dab", "d6fe40a036834d919ac7246189e2c81f", "019e0283956e4f68ad22184f944cf3c6", "4a768e102dcd463aab632c5ce2b43f1a", "23774a4251de4cb09e1d56c7c5795d05", "fdfd59523e754effa4878c5680a9616d", "4b240715214f4c29822073350ac0369d", "14d21c9d1ff840deb4e1223ff847c99c", "f455f2bb8e804a15bcb45aa4f5148d7c", "c36753994d014f809a112247ca2a88d9", "5f2c9704ac4d415aa329fb31f34c916f", "537a257c672647af991dbadbfee89bd1", "1c8c3747d86f4d7b900b546bd3b32065", "7e00de6878a44473a7bc3ae8c386da6e", "ac48058fb6bb42afad0c59fe59c2e04a", "1bf0ebb06347462ebe5966c67f5acfd9", "5f072e098c0c45028553b98bf3e86935", "033b22f052d648d1a23c13a4ebe42c96", "74f6ceb0670d448795e19382d598bff0", "c1940dbf42f149bf8432f813de5a9514", "f3e807bad5224e11a63657ad2a409fb7", "2d77cbe89b3e43ccac8b060a19871587", "a93447dcb49d476fa7754ba734ea3170", "52c56de17c5a4e18a068007ea15d4fe4", "25ec7a1ca66a40b283456109f4d43a51", "1be18665ff41490894995b775e2d68c0", "46ed28c510e44663b8dd221bd64a749b" ] }, "outputId": "911be225-edbe-43de-f79b-a5c7080b3cca" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "======================================================================\n", "Gaussian Splatting Preparation Pipeline\n", "======================================================================\n", "Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\n", "\n", " ✓ 30060098_13881136865.jpg: (1045, 776) → 2 square images generated\n", " ✓ 30932426_9182415658.jpg: (1066, 693) → 2 square images generated\n", " ✓ 31608163_6102133633.jpg: (1013, 757) → 2 square images generated\n", " ✓ 32234104_183977071.jpg: (1056, 780) → 2 square images generated\n", " ✓ 54689750_8177752931.jpg: (780, 1056) → 2 square images generated\n", " ✓ 54789298_4426358141.jpg: (1041, 774) → 2 square images generated\n", " ✓ 54924303_8694631531.jpg: (1020, 680) → 2 square images generated\n", " ✓ 55042300_2998445423.jpg: (780, 1056) → 2 square images generated\n", " ✓ 56336675_12025095504.jpg: (1030, 693) → 2 square images generated\n", " ✓ 56571846_8147517032.jpg: (1039, 773) → 2 square images generated\n", " ✓ 56929508_8486048059.jpg: (1021, 765) → 2 square images generated\n", " ✓ 57895262_5823807439.jpg: (1072, 786) → 2 square images generated\n", " ✓ 57903036_6034663613.jpg: (1014, 758) → 2 square images generated\n", " ✓ 58370062_228935435.jpg: (1047, 776) → 2 square images generated\n", " ✓ 60409409_9335730523.jpg: (1032, 684) → 2 square images generated\n", " ✓ 61195081_2862626518.jpg: (506, 377) → 2 square images generated\n", " ✓ 61353083_128372808.jpg: (606, 816) → 2 square images generated\n", " ✓ 61442570_1877638298.jpg: (935, 1081) → 2 square images generated\n", " ✓ 61931752_11269229804.jpg: (374, 501) → 2 square images generated\n", " ✓ 62109065_5110187734.jpg: (1021, 769) → 2 square images generated\n", " ✓ 63023054_7575192460.jpg: (777, 1049) → 2 square images generated\n", " ✓ 63358653_3107448586.jpg: (1021, 683) → 2 square images generated\n", " ✓ 63732276_3020022441.jpg: (692, 1068) → 2 square images generated\n", " ✓ 63831138_11531850475.jpg: (1057, 689) → 2 square images generated\n", " ✓ 63879656_8828566618.jpg: (1026, 768) → 2 square images generated\n", " ✓ 64133734_1372277262.jpg: (1047, 688) → 2 square images generated\n", " ✓ 64177922_6939801959.jpg: (1065, 651) → 2 square images generated\n", " ✓ 64512286_144258707.jpg: (689, 1050) → 2 square images generated\n", " ✓ 64752693_9379071376.jpg: (760, 1027) → 2 square images generated\n", " ✓ 65596895_2998348333.jpg: (786, 1072) → 2 square images generated\n", " ✓ 65600629_2179631506.jpg: (1041, 774) → 2 square images generated\n", " ✓ 65723931_2797767702.jpg: (1061, 782) → 2 square images generated\n", " ✓ 66906958_589662361.jpg: (1053, 779) → 2 square images generated\n", " ✓ 67684501_389553533.jpg: (1020, 764) → 2 square images generated\n", " ✓ 68173000_3055082929.jpg: (719, 1052) → 2 square images generated\n", " ✓ 68388628_416361462.jpg: (1067, 784) → 2 square images generated\n", " ✓ 68664732_743450957.jpg: (778, 1052) → 2 square images generated\n", " ✓ 69151734_8072413114.jpg: (1021, 677) → 2 square images generated\n", " ✓ 69241373_2334905294.jpg: (1047, 776) → 2 square images generated\n", " ✓ 69675366_3578017198.jpg: (1057, 780) → 2 square images generated\n", " ✓ 70032246_6347641014.jpg: (117, 204) → 2 square images generated\n", " ✓ 70699652_8066487031.jpg: (204, 114) → 2 square images generated\n", " ✓ 71316394_5233978047.jpg: (1033, 771) → 2 square images generated\n", " ✓ 71521166_8226626351.jpg: (1013, 757) → 2 square images generated\n", " ✓ 72075679_2232424648.jpg: (1045, 776) → 2 square images generated\n", " ✓ 72598875_8072411042.jpg: (680, 1032) → 2 square images generated\n", " ✓ 72646474_2231631573.jpg: (1048, 777) → 2 square images generated\n", " ✓ 73543943_6819377449.jpg: (1064, 783) → 2 square images generated\n", " ✓ 74323184_6819378543.jpg: (1061, 782) → 2 square images generated\n", " ✓ 74379252_3524899450.jpg: (1066, 784) → 2 square images generated\n", " ✓ 74949128_744301420.jpg: (1055, 780) → 2 square images generated\n", " ✓ 75556369_8486047615.jpg: (756, 1009) → 2 square images generated\n", " ✓ 75670494_10532227934.jpg: (1045, 776) → 2 square images generated\n", " ✓ 75795010_1073028.jpg: (513, 380) → 2 square images generated\n", " ✓ 75963148_11531980813.jpg: (1062, 690) → 2 square images generated\n", " ✓ 76012163_5234572012.jpg: (1057, 780) → 2 square images generated\n", " ✓ 76033783_4998492930.jpg: (1075, 694) → 2 square images generated\n", " ✓ 76893682_2999450842.jpg: (1036, 772) → 2 square images generated\n", " ✓ 77082765_2334078741.jpg: (780, 1055) → 2 square images generated\n", " ✓ 77322980_4097421297.jpg: (775, 1044) → 2 square images generated\n", " ✓ 77502589_7155532159.jpg: (1010, 1010) → 2 square images generated\n", " ✓ 78461576_2179633616.jpg: (1044, 776) → 2 square images generated\n", " ✓ 78756975_2797764270.jpg: (1060, 782) → 2 square images generated\n", " ✓ 80159334_3432556478.jpg: (610, 822) → 2 square images generated\n", " ✓ 80238669_11531890454.jpg: (1026, 680) → 2 square images generated\n", " ✓ 80394863_159173947.jpg: (1048, 777) → 2 square images generated\n", " ✓ 80419518_5109586935.jpg: (1026, 772) → 2 square images generated\n", " ✓ 80486775_7794448262.jpg: (1007, 756) → 2 square images generated\n", " ✓ 80637823_2178834461.jpg: (773, 1039) → 2 square images generated\n", " ✓ 82494139_2231488841.jpg: (1040, 774) → 2 square images generated\n", " ✓ 82619588_2231634419.jpg: (1041, 774) → 2 square images generated\n", " ✓ 83049944_6012584465.jpg: (689, 1047) → 2 square images generated\n", " ✓ 83295590_2999194388.jpg: (1061, 782) → 2 square images generated\n", " ✓ 83393115_2713666377.jpg: (1057, 691) → 2 square images generated\n", " ✓ 83494299_9338552580.jpg: (1032, 684) → 2 square images generated\n", " ✓ 83683619_338844451.jpg: (1060, 782) → 2 square images generated\n", " ✓ 83880039_8704930508.jpg: (1065, 696) → 2 square images generated\n", " ✓ 84280780_9149810125.jpg: (698, 1070) → 2 square images generated\n", " ✓ 84416532_1372248960.jpg: (1047, 689) → 2 square images generated\n", " ✓ 85875165_3423168615.jpg: (1009, 670) → 2 square images generated\n", " ✓ 87021942_9387960095.jpg: (692, 1060) → 2 square images generated\n", " ✓ 87218542_4659693663.jpg: (1051, 690) → 2 square images generated\n", " ✓ 87247686_240007777.jpg: (1056, 780) → 2 square images generated\n", " ✓ 87692684_217109210.jpg: (613, 454) → 2 square images generated\n", " ✓ 88392746_217109207.jpg: (612, 454) → 2 square images generated\n", " ✓ 89270148_8703811409.jpg: (1064, 696) → 2 square images generated\n", " ✓ 89428328_415853343.jpg: (1041, 774) → 2 square images generated\n", " ✓ 89821764_2714509360.jpg: (768, 1026) → 2 square images generated\n", " ✓ 89912815_3055922954.jpg: (1021, 543) → 2 square images generated\n", " ✓ 90349736_2179635862.jpg: (1038, 773) → 2 square images generated\n", " ✓ 90437703_2231631681.jpg: (776, 1046) → 2 square images generated\n", " ✓ 90505934_1674425132.jpg: (781, 595) → 2 square images generated\n", " ✓ 90635649_2575304688.jpg: (1033, 683) → 2 square images generated\n", " ✓ 90975523_4819480082.jpg: (1070, 698) → 2 square images generated\n", " ✓ 91086090_6251487936.jpg: (774, 1041) → 2 square images generated\n", " ✓ 91147360_7119433063.jpg: (756, 1012) → 2 square images generated\n", " ✓ 91223537_687114481.jpg: (696, 1078) → 2 square images generated\n", " ✓ 91887585_2717484563.jpg: (1024, 628) → 2 square images generated\n", " ✓ 91966016_2575756133.jpg: (1054, 779) → 2 square images generated\n", " ✓ 92092453_6346876777.jpg: (644, 1094) → 2 square images generated\n", " ✓ 92332702_2575760403.jpg: (1056, 780) → 2 square images generated\n", " ✓ 93176024_7794453938.jpg: (975, 563) → 2 square images generated\n", " ✓ 93452424_2796919411.jpg: (783, 1064) → 2 square images generated\n", " ✓ 93855352_2178846945.jpg: (1047, 777) → 2 square images generated\n", " ✓ 94589793_5911133174.jpg: (971, 643) → 2 square images generated\n", " ✓ 94608068_9152036176.jpg: (1029, 686) → 2 square images generated\n", " ✓ 95763587_2232280810.jpg: (1043, 775) → 2 square images generated\n", " ✓ 95912681_1074872458.jpg: (1199, 556) → 2 square images generated\n", " ✓ 96196686_8643079931.jpg: (1035, 772) → 2 square images generated\n", " ✓ 96969581_4970715984.jpg: (1004, 1004) → 2 square images generated\n", " ✓ 97044761_5284916914.jpg: (689, 1047) → 2 square images generated\n", " ✓ 97169342_4819482004.jpg: (1020, 683) → 2 square images generated\n", " ✓ 97408314_4660309480.jpg: (1052, 778) → 2 square images generated\n", " ✓ 97679107_216044726.jpg: (1028, 769) → 2 square images generated\n", " ✓ 98096144_7465571668.jpg: (1035, 1035) → 2 square images generated\n", " ✓ 98217251_4097423171.jpg: (1032, 770) → 2 square images generated\n", " ✓ 98347882_247481604.jpg: (1057, 691) → 2 square images generated\n", " ✓ 98365344_176581376.jpg: (1049, 777) → 2 square images generated\n", " ✓ 98413296_389554202.jpg: (1059, 781) → 2 square images generated\n", " ✓ 99829338_590024774.jpg: (1055, 780) → 2 square images generated\n", "\n", "Processing complete: 120 source images processed\n", "Original size distribution: {'1045x776': 3, '1066x693': 1, '1013x757': 2, '1056x780': 3, '780x1056': 2, '1041x774': 4, '1020x680': 1, '1030x693': 1, '1039x773': 1, '1021x765': 1, '1072x786': 1, '1014x758': 1, '1047x776': 2, '1032x684': 2, '506x377': 1, '606x816': 1, '935x1081': 1, '374x501': 1, '1021x769': 1, '777x1049': 1, '1021x683': 1, '692x1068': 1, '1057x689': 1, '1026x768': 1, '1047x688': 1, '1065x651': 1, '689x1050': 1, '760x1027': 1, '786x1072': 1, '1061x782': 3, '1053x779': 1, '1020x764': 1, '719x1052': 1, '1067x784': 1, '778x1052': 1, '1021x677': 1, '1057x780': 2, '117x204': 1, '204x114': 1, '1033x771': 1, '680x1032': 1, '1048x777': 2, '1064x783': 1, '1066x784': 1, '1055x780': 2, '756x1009': 1, '513x380': 1, '1062x690': 1, '1075x694': 1, '1036x772': 1, '780x1055': 1, '775x1044': 1, '1010x1010': 1, '1044x776': 1, '1060x782': 2, '610x822': 1, '1026x680': 1, '1026x772': 1, '1007x756': 1, '773x1039': 1, '1040x774': 1, '689x1047': 2, '1057x691': 2, '1065x696': 1, '698x1070': 1, '1047x689': 1, '1009x670': 1, '692x1060': 1, '1051x690': 1, '613x454': 1, '612x454': 1, '1064x696': 1, '768x1026': 1, '1021x543': 1, '1038x773': 1, '776x1046': 1, '781x595': 1, '1033x683': 1, '1070x698': 1, '774x1041': 1, '756x1012': 1, '696x1078': 1, '1024x628': 1, '1054x779': 1, '644x1094': 1, '975x563': 1, '783x1064': 1, '1047x777': 1, '971x643': 1, '1029x686': 1, '1043x775': 1, '1199x556': 1, '1035x772': 1, '1004x1004': 1, '1020x683': 1, '1052x778': 1, '1028x769': 1, '1035x1035': 1, '1032x770': 1, '1049x777': 1, '1059x781': 1}\n", "\n", "📸 Found 240 images\n", "\n", "=== Extracting DINO Global Features ===\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "/usr/local/lib/python3.12/dist-packages/huggingface_hub/file_download.py:942: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", " warnings.warn(\n", "/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_auth.py:94: UserWarning: \n", "The secret `HF_TOKEN` does not exist in your Colab secrets.\n", "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n", "You will be able to reuse this secret in all of your notebooks.\n", "Please note that authentication is recommended but still optional to access public models or datasets.\n", " warnings.warn(\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "preprocessor_config.json: 0%| | 0.00/436 [00:00