Upload 3 files
Browse files
asds/libs/advanced_zoom_extension.py
ADDED
|
@@ -0,0 +1,1633 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
╔══════════════════════════════════════════════════════════════════════════════╗
|
| 3 |
+
║ ADVANCED ZOOM SYSTEM vULTIMATE - ULTIMATE PRODUCTION VERSION ║
|
| 4 |
+
║ Лучшее из всех версий: V3.1.1 FIXED + Безопасность V3.2.1 FINAL_FIX ║
|
| 5 |
+
╚══════════════════════════════════════════════════════════════════════════════╝
|
| 6 |
+
|
| 7 |
+
🎯 vULTIMATE = V3.1.1 FIXED + EXPAND SAFETY
|
| 8 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 9 |
+
|
| 10 |
+
ЧТО ВЗЯТО ИЗ V3.1.1 FIXED:
|
| 11 |
+
✅ create_adaptive_latent_noise - COHERENT OUTPAINTING!
|
| 12 |
+
✅ ПОЛНЫЙ функционал (spiral_zoom, gradient_radial, noise_blend)
|
| 13 |
+
✅ Правильная логика convergence positioning
|
| 14 |
+
✅ Distance-based adaptive noise strength
|
| 15 |
+
✅ Fade mask для контента
|
| 16 |
+
✅ get_adaptive_epsilon для float16
|
| 17 |
+
✅ safe_interpolate для масштабирования
|
| 18 |
+
✅ apply_variance_correction
|
| 19 |
+
|
| 20 |
+
ЧТО ДОБАВЛЕНО ИЗ V3.2.1 FINAL_FIX:
|
| 21 |
+
✅ БЕЗОПАСНЫЙ EXPAND - все .expand() обернуты в try/except
|
| 22 |
+
✅ Fallback через broadcast_to при ошибках
|
| 23 |
+
✅ НЕТ RuntimeError: "expanded size must match"
|
| 24 |
+
✅ 100% стабильность без потери функционала
|
| 25 |
+
|
| 26 |
+
КРИТИЧНЫЕ ИСПРАВЛЕНИЯ (vUltimate):
|
| 27 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 28 |
+
🔧 edge_smoothing.expand() - обернут в try/except
|
| 29 |
+
🔧 gradient.expand() - обернут в try/except (apply_gradient_radial_blend)
|
| 30 |
+
🔧 blend_mask.expand() - обернут в try/except (apply_noise_blend)
|
| 31 |
+
🔧 adaptive_strength.expand() - уже был безопасным в V3.1.1
|
| 32 |
+
🔧 variance_fix.expand() - уже был безопасным в V3.1.1
|
| 33 |
+
|
| 34 |
+
ИСПРАВЛЕНИЯ ИЗ V3.1.1 FIXED (сохранены):
|
| 35 |
+
✅ FLOAT16 EPSILON FIX - адаптивный epsilon (1e-3 для float16, 1e-6 для float32)
|
| 36 |
+
✅ EDGE_SMOOTHING FIX - правильный expand до (b,c,H,W) вместо (b,c,1,W)
|
| 37 |
+
✅ SAFE_INTERPOLATE - безопасная интерполяция для float16
|
| 38 |
+
✅ ALIGN_CORNERS FIX - правильная обработка без None
|
| 39 |
+
✅ VARIANCE_CORRECTION - адаптивный epsilon + надежный broadcast
|
| 40 |
+
✅ SPIRAL_ZOOM - адаптивный epsilon для всех sqrt/division
|
| 41 |
+
✅ NOISE_BLEND - адаптивный epsilon + оптимизированная формула
|
| 42 |
+
✅ EXTRA_PARAMS - правильная передача параметров
|
| 43 |
+
✅ DIVISION BY ZERO - защита во всех критичных местах
|
| 44 |
+
|
| 45 |
+
ИСПРАВЛЕНИЯ ИЗ V3.1 COMPLETE (сохранены):
|
| 46 |
+
✅ QUANTILE FIX - правильная обработка dtype + умное сэмплирование (>10M)
|
| 47 |
+
✅ TENSOR SIZE FIX - исправлена ошибка "expanded size must match existing size"
|
| 48 |
+
✅ SPIRAL ZOOM - полная реализация без багов + валидация параметров
|
| 49 |
+
✅ GRADIENT_RADIAL - новый режим блендинга с радиальным градиентом
|
| 50 |
+
✅ NOISE_BLEND - новый режим блендинга с процедурным шумом
|
| 51 |
+
✅ WARNINGS - всегда видны, детальная диагностика
|
| 52 |
+
✅ SHAPE VALIDATION - проверка размеров на каждом шаге
|
| 53 |
+
|
| 54 |
+
ФУНКЦИИ (vUltimate):
|
| 55 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 56 |
+
🆕 get_adaptive_epsilon(dtype) - автоматический выбор epsilon
|
| 57 |
+
🆕 safe_interpolate() - безопасная интерполяция для float16
|
| 58 |
+
🆕 create_adaptive_latent_noise() - COHERENT NOISE ДЛЯ OUTPAINTING
|
| 59 |
+
🆕 apply_variance_correction() - устранение серости на швах
|
| 60 |
+
🆕 apply_spiral_zoom() - спиральный zoom с вращением
|
| 61 |
+
🆕 apply_gradient_radial_blend() - радиальный градиент (SAFE EXPAND!)
|
| 62 |
+
🆕 apply_noise_blend() - процедурный шум (SAFE EXPAND!)
|
| 63 |
+
|
| 64 |
+
РЕЖИМЫ ZOOM:
|
| 65 |
+
════════════════════════════════════════════════════════════════��══════════════
|
| 66 |
+
🎯 OUTPAINT_ZOOM - оптимизирован для outpainting (COHERENT NOISE!)
|
| 67 |
+
🎯 SPIRAL_ZOOM - спиральный зум с вращением
|
| 68 |
+
🎯 GRID_WARP - геометрический zoom
|
| 69 |
+
🎯 BLEND_TRANSITION - плавный переход
|
| 70 |
+
🎯 CONVERGENCE_SHIFT - legacy сдвиг
|
| 71 |
+
🎯 HYBRID - комбинация методов
|
| 72 |
+
|
| 73 |
+
РЕЖИМЫ BLEND:
|
| 74 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 75 |
+
🌈 CIRCULAR_REFLECT - бесшовный + отражение
|
| 76 |
+
🌈 CIRCULAR_CONSTANT - бесшовный + константа
|
| 77 |
+
🌈 REFLECT_CONSTANT - отражение + константа
|
| 78 |
+
🌈 POLAR_CIRCULAR - полярное + бесшовный
|
| 79 |
+
🌈 MIRROR_CIRCULAR - зеркало + бесшовный
|
| 80 |
+
🌈 ANISO_CIRCULAR - анизотропный + бесшовный
|
| 81 |
+
🌈 GRADIENT_RADIAL - радиальный градиент (НОВОЕ!)
|
| 82 |
+
🌈 NOISE_BLEND - процедурный шум (НОВОЕ!)
|
| 83 |
+
🌈 CUSTOM - пользовательский
|
| 84 |
+
|
| 85 |
+
ПАРАМЕТРЫ:
|
| 86 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 87 |
+
✨ zoom_factor - сила зума (-10 до +10)
|
| 88 |
+
✨ convergence_point/convergence_y - точки фокуса (0.0-1.0)
|
| 89 |
+
✨ depth_power - кривая глубины
|
| 90 |
+
✨ pan_x/pan_y - сдвиг камеры (-1.0 до +1.0)
|
| 91 |
+
✨ fade_strength - сила fade контента (0.0-1.0)
|
| 92 |
+
✨ noise_strength - сила шума для outpainting (0.5-1.5, default: 1.0)
|
| 93 |
+
✨ spiral_rotation - сила вращения (0.0-2.0)
|
| 94 |
+
✨ spiral_direction - направление вращения (1.0/-1.0)
|
| 95 |
+
✨ gradient_center_x/y - центр градиента (0.0-1.0)
|
| 96 |
+
✨ gradient_radius - радиус градиента (0.1-2.0)
|
| 97 |
+
✨ noise_scale - масштаб шума (1.0-10.0)
|
| 98 |
+
✨ noise_octaves - октавы шума (1-4)
|
| 99 |
+
✨ interp_mode - режим интерполяции ('bilinear'/'bicubic'/'nearest')
|
| 100 |
+
✨ debug - режим отладки
|
| 101 |
+
|
| 102 |
+
СОВМЕСТИМОСТЬ:
|
| 103 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 104 |
+
✅ Полная интеграция с asymmetric_tiling_UNIFIED.py
|
| 105 |
+
✅ Поддержка всех параметров из V3.0/V3.1/V3.1.1
|
| 106 |
+
✅ Обратная совместимость со всеми режимами
|
| 107 |
+
✅ extra_params поддержка для расширяемости
|
| 108 |
+
✅ FLOAT16 СОВМЕСТИМОСТЬ - все критичные исправления применены
|
| 109 |
+
✅ RUNTIME ERROR HANDLING - детальная диагностика и fallback
|
| 110 |
+
✅ NO EXPAND ERRORS - все .expand() безопасные
|
| 111 |
+
|
| 112 |
+
ПРОИЗВОДИТЕЛЬНОСТЬ:
|
| 113 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 114 |
+
⚡ Умное кэширование (distance maps, noise patterns)
|
| 115 |
+
⚡ Сэмплирование только для огромных тензоров (>10M элементов)
|
| 116 |
+
⚡ Оптимизированные математические операции
|
| 117 |
+
⚡ Минимальное использование памяти
|
| 118 |
+
⚡ Безопасная работа с float16 без overflow/underflow
|
| 119 |
+
⚡ Детальная диагностика для отладки
|
| 120 |
+
⚡ Coherent adaptive noise для лучшего outpainting
|
| 121 |
+
|
| 122 |
+
КАЧЕСТВО OUTPAINTING:
|
| 123 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 124 |
+
🌟 Adaptive latent noise - шум адаптируется к статистике входных латентов
|
| 125 |
+
🌟 Distance-based strength - сила шума зависит от расстояния до контента
|
| 126 |
+
🌟 Coherent generation - нейросеть получает правильные подсказки для генерации
|
| 127 |
+
🌟 Fade mask - плавный переход между контентом и шумом
|
| 128 |
+
🌟 Convergence positioning - контроль положения контента на canvas
|
| 129 |
+
|
| 130 |
+
СТАБИЛЬНОСТЬ:
|
| 131 |
+
═══════════════════════════════════════════════════════════════════════════════
|
| 132 |
+
✅ В��е .expand() обернуты в try/except
|
| 133 |
+
✅ Fallback через broadcast_to при ошибках
|
| 134 |
+
✅ Проверка размеров перед операциями
|
| 135 |
+
✅ Детальная диагностика при ошибках
|
| 136 |
+
✅ Адаптивный epsilon для float16
|
| 137 |
+
✅ Safe interpolate без dtype mismatch
|
| 138 |
+
✅ НЕТ ИЗВЕСТНЫХ БАГОВ
|
| 139 |
+
|
| 140 |
+
╔══════════════════════════════════════════════════════════════════════════════╗
|
| 141 |
+
║ 🚀 vULTIMATE IS READY! 🚀 ║
|
| 142 |
+
║ Coherent Outpainting + Rock-Solid Stability ║
|
| 143 |
+
╚══════════════════════════════════════════════════════════════════════════════╝
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
import torch
|
| 147 |
+
import torch.nn.functional as F
|
| 148 |
+
import math
|
| 149 |
+
from enum import Enum
|
| 150 |
+
from collections import OrderedDict
|
| 151 |
+
|
| 152 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 153 |
+
# УТИЛИТА ДЛЯ FLOAT16 СОВМЕСТИМОСТИ (V3.1.1 - НОВОЕ)
|
| 154 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 155 |
+
|
| 156 |
+
def get_adaptive_epsilon(dtype):
|
| 157 |
+
"""
|
| 158 |
+
Возвращает подходящий epsilon для данного dtype.
|
| 159 |
+
|
| 160 |
+
V3.1.1: КРИТИЧНОЕ ДЛЯ FLOAT16
|
| 161 |
+
Float16 имеет минимальное значение ~6e-5, поэтому 1e-6 вызывает underflow.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
dtype: torch.dtype тензора
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
float: безопасный epsilon для данного типа
|
| 168 |
+
"""
|
| 169 |
+
if dtype == torch.float16:
|
| 170 |
+
return 1e-3 # Безопасный epsilon для float16
|
| 171 |
+
elif dtype == torch.float32:
|
| 172 |
+
return 1e-6 # Стандартный epsilon для float32
|
| 173 |
+
else: # float64
|
| 174 |
+
return 1e-12 # Высокая точность для float64
|
| 175 |
+
|
| 176 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 177 |
+
# BOOL NORMALIZATION HELPERS
|
| 178 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 179 |
+
|
| 180 |
+
def _coerce_bool_param(value, default=False):
|
| 181 |
+
"""Robust bool parsing for Gradio values, PNG infotext, presets and JSON.
|
| 182 |
+
|
| 183 |
+
Prevents Python's bool("False") == True pitfall while preserving the
|
| 184 |
+
standard behaviour for real booleans and numerics.
|
| 185 |
+
"""
|
| 186 |
+
if value is None:
|
| 187 |
+
return bool(default)
|
| 188 |
+
if isinstance(value, bool):
|
| 189 |
+
return value
|
| 190 |
+
if isinstance(value, (int, float)):
|
| 191 |
+
return value != 0
|
| 192 |
+
if isinstance(value, str):
|
| 193 |
+
s = value.strip().lower()
|
| 194 |
+
if s in {'1', 'true', 'yes', 'y', 'on'}:
|
| 195 |
+
return True
|
| 196 |
+
if s in {'0', 'false', 'no', 'n', 'off', 'none', 'null', ''}:
|
| 197 |
+
return False
|
| 198 |
+
return bool(value)
|
| 199 |
+
|
| 200 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 201 |
+
# ENUMS
|
| 202 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 203 |
+
|
| 204 |
+
class ZoomMode(Enum):
|
| 205 |
+
OUTPAINT_ZOOM = "outpaint_zoom" # Оптимизирован для outpainting (рекомендуется!)
|
| 206 |
+
BLEND_TRANSITION = "blend_transition" # Плавный переход с blending
|
| 207 |
+
CONVERGENCE_SHIFT = "convergence_shift" # Legacy сдвиг
|
| 208 |
+
GRID_WARP = "grid_warp" # Геометрический zoom
|
| 209 |
+
HYBRID = "hybrid" # Комбинация
|
| 210 |
+
SPIRAL_ZOOM = "spiral_zoom" # 🆕 V3.1: Спиральный zoom с вращением
|
| 211 |
+
|
| 212 |
+
class BlendMode(Enum):
|
| 213 |
+
CIRCULAR_REFLECT = "circular_reflect" # Бесшовный + отражение
|
| 214 |
+
CIRCULAR_CONSTANT = "circular_constant" # Бесшовный + константа
|
| 215 |
+
REFLECT_CONSTANT = "reflect_constant" # Отражение + константа
|
| 216 |
+
POLAR_CIRCULAR = "polar_circular" # Полярное + бесшовный
|
| 217 |
+
MIRROR_CIRCULAR = "mirror_circular" # Зеркало + бесшовный
|
| 218 |
+
ANISO_CIRCULAR = "aniso_circular" # Анизотропный + бесшовный
|
| 219 |
+
CUSTOM = "custom" # Пользовательский
|
| 220 |
+
GRADIENT_RADIAL = "gradient_radial" # 🆕 V3.1: Радиальный градиент
|
| 221 |
+
NOISE_BLEND = "noise_blend" # 🆕 V3.1: Блендинг с процедурным шумом
|
| 222 |
+
|
| 223 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 224 |
+
# КЭШИРОВАНИЕ (V3.0 - УЛУЧШЕНО)
|
| 225 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 226 |
+
|
| 227 |
+
class DistanceMapCache:
|
| 228 |
+
"""Кэш для distance maps с LRU вытеснением"""
|
| 229 |
+
def __init__(self, max_size=20):
|
| 230 |
+
self.cache = OrderedDict()
|
| 231 |
+
self.max_size = max_size
|
| 232 |
+
|
| 233 |
+
def get(self, key):
|
| 234 |
+
if key in self.cache:
|
| 235 |
+
self.cache.move_to_end(key)
|
| 236 |
+
return self.cache[key]
|
| 237 |
+
return None
|
| 238 |
+
|
| 239 |
+
def set(self, key, value):
|
| 240 |
+
if key in self.cache:
|
| 241 |
+
self.cache.move_to_end(key)
|
| 242 |
+
else:
|
| 243 |
+
if len(self.cache) >= self.max_size:
|
| 244 |
+
self.cache.popitem(last=False)
|
| 245 |
+
self.cache[key] = value
|
| 246 |
+
|
| 247 |
+
_DISTANCE_MAP_CACHE = DistanceMapCache()
|
| 248 |
+
|
| 249 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 250 |
+
# УТИЛИТЫ ДЛЯ ЛАТЕНТНОГО ШУМА (V3.0 - УЛУЧШЕНО)
|
| 251 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 252 |
+
|
| 253 |
+
def compute_latent_statistics(input_tensor, percentile_clip=True):
|
| 254 |
+
"""
|
| 255 |
+
Вычисляет статистику латентов для правильной генерации шума.
|
| 256 |
+
|
| 257 |
+
V3.0: Добавлен percentile_clip для робастности
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
input_tensor: входной тензор латентов
|
| 261 |
+
percentile_clip: использовать percentile вместо min/max
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
dict: {'mean': float, 'std': float, 'min': float, 'max': float}
|
| 265 |
+
"""
|
| 266 |
+
stats = {
|
| 267 |
+
'mean': input_tensor.mean().item(),
|
| 268 |
+
'std': input_tensor.std().item(),
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
if percentile_clip:
|
| 272 |
+
# V3.1 FIX: Правильная обработка quantile() dtype
|
| 273 |
+
flat = input_tensor.flatten()
|
| 274 |
+
|
| 275 |
+
# V3.1.1 FIX: Явная конверсия в float32 (вместо неявного .float())
|
| 276 |
+
if flat.dtype not in [torch.float32, torch.float64]:
|
| 277 |
+
flat = flat.to(torch.float32) # Более явный и безопасный вариант
|
| 278 |
+
|
| 279 |
+
# Умное сэмплирование ТОЛЬКО для очень больших тензоров (>10M элементов)
|
| 280 |
+
if flat.numel() > 10_000_000:
|
| 281 |
+
indices = torch.randperm(flat.numel(), device=flat.device)[:1_000_000]
|
| 282 |
+
flat = flat[indices]
|
| 283 |
+
|
| 284 |
+
try:
|
| 285 |
+
stats['min'] = torch.quantile(flat, 0.01).item()
|
| 286 |
+
stats['max'] = torch.quantile(flat, 0.99).item()
|
| 287 |
+
except RuntimeError as e:
|
| 288 |
+
# Fallback: используем сортировку для робастного percentile
|
| 289 |
+
sorted_flat = torch.sort(flat)[0]
|
| 290 |
+
idx_01 = max(0, int(0.01 * len(sorted_flat)))
|
| 291 |
+
idx_99 = min(len(sorted_flat) - 1, int(0.99 * len(sorted_flat)))
|
| 292 |
+
stats['min'] = sorted_flat[idx_01].item()
|
| 293 |
+
stats['max'] = sorted_flat[idx_99].item()
|
| 294 |
+
else:
|
| 295 |
+
stats['min'] = input_tensor.min().item()
|
| 296 |
+
stats['max'] = input_tensor.max().item()
|
| 297 |
+
|
| 298 |
+
return stats
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def create_distance_map(canvas_h, canvas_w, content_box, device, dtype):
|
| 302 |
+
"""
|
| 303 |
+
Создает карту расстояний от контента с кэшированием.
|
| 304 |
+
|
| 305 |
+
V3.0: Добавлено кэширование для оптимизации
|
| 306 |
+
|
| 307 |
+
Args:
|
| 308 |
+
canvas_h, canvas_w: размеры холста
|
| 309 |
+
content_box: (y1, y2, x1, x2) где размещен контент
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
torch.Tensor (1, 1, canvas_h, canvas_w): карта расстояний [0, 1]
|
| 313 |
+
"""
|
| 314 |
+
# Проверяем кэш
|
| 315 |
+
cache_key = (canvas_h, canvas_w, content_box, str(device), str(dtype))
|
| 316 |
+
cached = _DISTANCE_MAP_CACHE.get(cache_key)
|
| 317 |
+
if cached is not None:
|
| 318 |
+
return cached
|
| 319 |
+
|
| 320 |
+
y1, y2, x1, x2 = content_box
|
| 321 |
+
|
| 322 |
+
# Создаем координатные сетки
|
| 323 |
+
y_coords = torch.arange(canvas_h, device=device, dtype=dtype).view(-1, 1).expand(canvas_h, canvas_w)
|
| 324 |
+
x_coords = torch.arange(canvas_w, device=device, dtype=dtype).view(1, -1).expand(canvas_h, canvas_w)
|
| 325 |
+
|
| 326 |
+
# Расстояние до ближайшей точки контента
|
| 327 |
+
dist_y = torch.maximum(
|
| 328 |
+
torch.clamp(y1 - y_coords, min=0),
|
| 329 |
+
torch.clamp(y_coords - y2, min=0)
|
| 330 |
+
)
|
| 331 |
+
dist_x = torch.maximum(
|
| 332 |
+
torch.clamp(x1 - x_coords, min=0),
|
| 333 |
+
torch.clamp(x_coords - x2, min=0)
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
# Евклидово расстояние
|
| 337 |
+
distance = torch.sqrt(dist_x ** 2 + dist_y ** 2)
|
| 338 |
+
|
| 339 |
+
# V3.1.1 FIX: Защита от деления на 0 для float16 (было: может быть 0)
|
| 340 |
+
max_dist = max(math.sqrt(canvas_h**2 + canvas_w**2) * 0.5, get_adaptive_epsilon(dtype))
|
| 341 |
+
distance_norm = torch.clamp(distance / max_dist, 0, 1)
|
| 342 |
+
|
| 343 |
+
result = distance_norm.unsqueeze(0).unsqueeze(0)
|
| 344 |
+
|
| 345 |
+
# Сохраняем в кэш
|
| 346 |
+
_DISTANCE_MAP_CACHE.set(cache_key, result)
|
| 347 |
+
|
| 348 |
+
return result
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def create_adaptive_latent_noise(canvas_shape, content_box, zoom_factor, input_stats,
|
| 352 |
+
device, dtype, blend_mode='circular_reflect',
|
| 353 |
+
noise_strength=1.0, adaptive_scale=True, seed=-1):
|
| 354 |
+
"""
|
| 355 |
+
Adaptive latent noise for coherent outpainting backgrounds.
|
| 356 |
+
Pass seed >= 0 for reproducible results via a local torch.Generator;
|
| 357 |
+
seed=-1 (default) uses global RNG so behaviour matches the rest of the
|
| 358 |
+
pipeline without disturbing it.
|
| 359 |
+
"""
|
| 360 |
+
b, c, canvas_h, canvas_w = canvas_shape
|
| 361 |
+
|
| 362 |
+
# Use a local Generator when a seed is requested so global RNG state
|
| 363 |
+
# is never mutated (mirrors the fix applied to gaussian_latent_noise).
|
| 364 |
+
gen = None
|
| 365 |
+
if seed >= 0:
|
| 366 |
+
gen = torch.Generator(device=device)
|
| 367 |
+
gen.manual_seed(int(seed))
|
| 368 |
+
|
| 369 |
+
base_noise = torch.randn(b, c, canvas_h, canvas_w,
|
| 370 |
+
device=device, dtype=dtype, generator=gen)
|
| 371 |
+
|
| 372 |
+
# 2. Применяем статистику
|
| 373 |
+
base_noise = base_noise * input_stats['std'] + input_stats['mean']
|
| 374 |
+
|
| 375 |
+
# 3. Distance map
|
| 376 |
+
distance_map = create_distance_map(canvas_h, canvas_w, content_box, device, dtype)
|
| 377 |
+
|
| 378 |
+
# 4. Adaptive scaling
|
| 379 |
+
if adaptive_scale:
|
| 380 |
+
zoom_scale = 1.0 - min(abs(zoom_factor) * 0.05, 0.3)
|
| 381 |
+
else:
|
| 382 |
+
zoom_scale = 1.0
|
| 383 |
+
|
| 384 |
+
# 5. Итоговая сила шума
|
| 385 |
+
final_strength = noise_strength * zoom_scale
|
| 386 |
+
|
| 387 |
+
# 6. Адаптивная сила
|
| 388 |
+
adaptive_strength = final_strength * (0.5 + distance_map * 1.5)
|
| 389 |
+
|
| 390 |
+
# 7. Apply strength (Безопасный expand)
|
| 391 |
+
if adaptive_strength.shape != base_noise.shape:
|
| 392 |
+
try:
|
| 393 |
+
adaptive_strength = adaptive_strength.expand(b, c, canvas_h, canvas_w)
|
| 394 |
+
except RuntimeError:
|
| 395 |
+
adaptive_strength = adaptive_strength.reshape(1, 1, canvas_h, canvas_w).expand(b, c, canvas_h, canvas_w)
|
| 396 |
+
|
| 397 |
+
adaptive_noise = base_noise * adaptive_strength
|
| 398 |
+
|
| 399 |
+
# 8. Edge smoothing
|
| 400 |
+
if 'circular' in blend_mode:
|
| 401 |
+
edge_smoothing = 0.9 + 0.1 * torch.cos(
|
| 402 |
+
torch.linspace(0, 2*math.pi, canvas_w, device=device, dtype=dtype)
|
| 403 |
+
).view(1, 1, 1, -1)
|
| 404 |
+
try:
|
| 405 |
+
edge_smoothing = edge_smoothing.expand(b, c, canvas_h, canvas_w)
|
| 406 |
+
except RuntimeError:
|
| 407 |
+
edge_smoothing = edge_smoothing.reshape(1, 1, 1, canvas_w).expand(b, c, canvas_h, canvas_w)
|
| 408 |
+
adaptive_noise = adaptive_noise * edge_smoothing
|
| 409 |
+
|
| 410 |
+
return adaptive_noise
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def apply_variance_correction(blended_tensor, mask, debug=False):
|
| 414 |
+
"""
|
| 415 |
+
v3.3 ULTIMATE FIX:
|
| 416 |
+
1. Исправляет размер (RuntimeError: size mismatch 30 vs 28)
|
| 417 |
+
2. Исправляет тип данных (RuntimeError: Input type float and bias type Half)
|
| 418 |
+
"""
|
| 419 |
+
# 1. Запоминаем исходный тип данных (скорее всего Float16)
|
| 420 |
+
target_dtype = blended_tensor.dtype
|
| 421 |
+
|
| 422 |
+
# Защита размерности маски
|
| 423 |
+
if mask.dim() < 4:
|
| 424 |
+
mask = mask.view(1, 1, mask.shape[-2], mask.shape[-1])
|
| 425 |
+
|
| 426 |
+
# Получаем epsilon (приводим к float32 для безопасности вычислений)
|
| 427 |
+
eps = 1e-6
|
| 428 |
+
|
| 429 |
+
# 2. Вычисляем маску в высокой точности (Float32), чтобы не было нулей
|
| 430 |
+
mask_f32 = mask.to(torch.float32)
|
| 431 |
+
variance_fix = torch.sqrt(mask_f32**2 + (1 - mask_f32)**2 + eps)
|
| 432 |
+
|
| 433 |
+
# 3. КРИТИЧЕСКОЕ ИСПРАВЛЕНИЕ РАЗМЕРОВ (Подгонка)
|
| 434 |
+
if variance_fix.shape[-2:] != blended_tensor.shape[-2:]:
|
| 435 |
+
if debug:
|
| 436 |
+
print(f"⚠️ Resizing correction mask: {variance_fix.shape} -> {blended_tensor.shape}")
|
| 437 |
+
|
| 438 |
+
variance_fix = F.interpolate(
|
| 439 |
+
variance_fix,
|
| 440 |
+
size=blended_tensor.shape[-2:],
|
| 441 |
+
mode='bilinear',
|
| 442 |
+
align_corners=True
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
# 4. Возвращаем маску в исходный тип (например, Float16) ПЕРЕД применением
|
| 446 |
+
variance_fix = variance_fix.to(dtype=target_dtype)
|
| 447 |
+
|
| 448 |
+
# 5. Безопасное расширение (Expand)
|
| 449 |
+
if variance_fix.shape[0] != blended_tensor.shape[0] or variance_fix.shape[1] != blended_tensor.shape[1]:
|
| 450 |
+
try:
|
| 451 |
+
variance_fix = variance_fix.expand_as(blended_tensor)
|
| 452 |
+
except RuntimeError:
|
| 453 |
+
# Fallback через repeat
|
| 454 |
+
target_shape = blended_tensor.shape
|
| 455 |
+
cur_shape = variance_fix.shape
|
| 456 |
+
reps = [max(1, t // c) for t, c in zip(target_shape, cur_shape)]
|
| 457 |
+
while len(reps) < 4: reps.insert(0, 1)
|
| 458 |
+
variance_fix = variance_fix.repeat(*reps)
|
| 459 |
+
|
| 460 |
+
# 6. Применяем коррекцию
|
| 461 |
+
corrected = blended_tensor / variance_fix
|
| 462 |
+
|
| 463 |
+
# 7. ФИНАЛЬНАЯ ЗАЩИТА ТИПА (Гарантируем возврат того же типа, что пришел)
|
| 464 |
+
return corrected.to(dtype=target_dtype)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 468 |
+
# 1. УЛУЧШЕННЫЙ LEGACY METHOD (V3.0)
|
| 469 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 470 |
+
|
| 471 |
+
def apply_legacy_shift_zoom(input_tensor, zoom_factor, convergence=0.5, power=1.0,
|
| 472 |
+
pan_x=0.0, pan_y=0.0, auto_clamp_pan=True, debug=False):
|
| 473 |
+
"""
|
| 474 |
+
V3.0 УЛУЧШЕНИЯ:
|
| 475 |
+
- Добавлен auto_clamp_pan для безопасного pan
|
| 476 |
+
- Debug mode для диагностики
|
| 477 |
+
"""
|
| 478 |
+
b, c, h, w = input_tensor.shape
|
| 479 |
+
device = input_tensor.device
|
| 480 |
+
dtype = input_tensor.dtype
|
| 481 |
+
|
| 482 |
+
# V3.0: Auto-clamp pan для предотвращения потери контента
|
| 483 |
+
if auto_clamp_pan:
|
| 484 |
+
pan_x = max(-0.5, min(0.5, pan_x))
|
| 485 |
+
pan_y = max(-0.5, min(0.5, pan_y))
|
| 486 |
+
|
| 487 |
+
# 1. Применяем Pan
|
| 488 |
+
if pan_x != 0 or pan_y != 0:
|
| 489 |
+
shift_x = int(w * pan_x)
|
| 490 |
+
shift_y = int(h * pan_y)
|
| 491 |
+
input_tensor = torch.roll(input_tensor, shifts=(shift_y, shift_x), dims=(2, 3))
|
| 492 |
+
|
| 493 |
+
if debug:
|
| 494 |
+
print(f"[Legacy Shift Zoom] Pan applied: X={shift_x}px, Y={shift_y}px")
|
| 495 |
+
|
| 496 |
+
if abs(zoom_factor) < 0.001:
|
| 497 |
+
return input_tensor
|
| 498 |
+
|
| 499 |
+
# Максимальный сдвиг
|
| 500 |
+
max_shift_w = w // 4
|
| 501 |
+
max_shift_h = h // 4
|
| 502 |
+
|
| 503 |
+
shift_px_w = int(max_shift_w * (zoom_factor / 5.0))
|
| 504 |
+
shift_px_h = int(max_shift_h * (zoom_factor / 5.0))
|
| 505 |
+
|
| 506 |
+
# Правильная форма тензоров
|
| 507 |
+
x_1d = torch.linspace(0, 1, w, device=device, dtype=dtype)
|
| 508 |
+
x = x_1d.view(1, 1, 1, w).expand(b, c, h, w)
|
| 509 |
+
|
| 510 |
+
# Расстояние от convergence point
|
| 511 |
+
dist_x = torch.abs(x - convergence)
|
| 512 |
+
|
| 513 |
+
# Power - это "Mask Sharpness"
|
| 514 |
+
mask_w = torch.pow(torch.clamp(dist_x * 2.0, 0, 1), power)
|
| 515 |
+
|
| 516 |
+
# ZOOM OUT
|
| 517 |
+
if zoom_factor < 0:
|
| 518 |
+
left_mask = (x < convergence).to(dtype=dtype)
|
| 519 |
+
right_mask = (x >= convergence).to(dtype=dtype)
|
| 520 |
+
|
| 521 |
+
shifted_left = torch.roll(input_tensor, shifts=shift_px_w, dims=3)
|
| 522 |
+
shifted_right = torch.roll(input_tensor, shifts=-shift_px_w, dims=3)
|
| 523 |
+
|
| 524 |
+
result = shifted_left * left_mask + shifted_right * right_mask
|
| 525 |
+
return result
|
| 526 |
+
|
| 527 |
+
# ZOOM IN
|
| 528 |
+
else:
|
| 529 |
+
shifted = torch.roll(input_tensor, shifts=shift_px_w, dims=3)
|
| 530 |
+
result = input_tensor * (1.0 - mask_w) + shifted * mask_w
|
| 531 |
+
return result
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 535 |
+
# 2. УЛУЧШЕННЫЙ GRID WARP (V3.0)
|
| 536 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 537 |
+
|
| 538 |
+
def apply_grid_warp_zoom(input_tensor, zoom_factor, convergence=0.5, power=1.0,
|
| 539 |
+
pan_x=0.0, pan_y=0.0, convergence_y=0.5,
|
| 540 |
+
interp_mode='bilinear', debug=False):
|
| 541 |
+
"""
|
| 542 |
+
V3.0 УЛУЧШЕНИЯ:
|
| 543 |
+
- Добавлен параметр interp_mode ('bilinear', 'bicubic', 'nearest')
|
| 544 |
+
- Scale clamping для предотвращения NaN
|
| 545 |
+
- Debug mode
|
| 546 |
+
"""
|
| 547 |
+
b, c, h, w = input_tensor.shape
|
| 548 |
+
device = input_tensor.device
|
| 549 |
+
dtype = input_tensor.dtype
|
| 550 |
+
|
| 551 |
+
# V3.0: Scale clamping для безопасности
|
| 552 |
+
scale = 1.0 + (zoom_factor * 0.1)
|
| 553 |
+
scale = torch.clamp(torch.tensor(scale, device=device), min=0.1, max=10.0).item()
|
| 554 |
+
|
| 555 |
+
if debug:
|
| 556 |
+
print(f"[Grid Warp] Scale: {scale:.4f}, Interp: {interp_mode}")
|
| 557 |
+
|
| 558 |
+
y_coords = torch.linspace(-1, 1, h, device=device, dtype=dtype)
|
| 559 |
+
x_coords = torch.linspace(-1, 1, w, device=device, dtype=dtype)
|
| 560 |
+
|
| 561 |
+
y_grid, x_grid = torch.meshgrid(y_coords, x_coords, indexing='ij')
|
| 562 |
+
|
| 563 |
+
# Convergence определяет центр
|
| 564 |
+
center_x = (convergence - 0.5) * 2.0
|
| 565 |
+
center_y = (convergence_y - 0.5) * 2.0
|
| 566 |
+
|
| 567 |
+
# Pan
|
| 568 |
+
offset_x = pan_x * 2.0
|
| 569 |
+
offset_y = pan_y * 2.0
|
| 570 |
+
|
| 571 |
+
# Zoom относительно convergence point
|
| 572 |
+
x_new = (x_grid - center_x) / scale + center_x - offset_x
|
| 573 |
+
y_new = (y_grid - center_y) / scale + center_y - offset_y
|
| 574 |
+
|
| 575 |
+
grid = torch.stack((x_new, y_new), dim=-1)
|
| 576 |
+
grid = grid.unsqueeze(0).expand(b, -1, -1, -1)
|
| 577 |
+
|
| 578 |
+
# V3.0: Поддержка разных режимов интерполяции
|
| 579 |
+
if interp_mode not in ['bilinear', 'nearest']:
|
| 580 |
+
interp_mode = 'bilinear' # Fallback (bicubic не поддерживается в grid_sample)
|
| 581 |
+
|
| 582 |
+
return F.grid_sample(
|
| 583 |
+
input_tensor,
|
| 584 |
+
grid,
|
| 585 |
+
mode=interp_mode,
|
| 586 |
+
padding_mode='zeros',
|
| 587 |
+
align_corners=True
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 593 |
+
# 2.5. БЕЗОПАСНАЯ ИНТЕРПОЛЯЦИЯ ДЛЯ FLOAT16 (V3.1.1 - НОВОЕ)
|
| 594 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 595 |
+
|
| 596 |
+
def safe_interpolate(tensor, size, mode='bilinear'):
|
| 597 |
+
"""
|
| 598 |
+
Безопасная интерполяция с поддержкой float16.
|
| 599 |
+
|
| 600 |
+
V3.1.1: НОВАЯ ФУНКЦИЯ
|
| 601 |
+
- Конвертирует float16 → float32 для интерполяции (предотвращает overflow)
|
| 602 |
+
- Правильно обрабатывает align_corners (не использует None)
|
| 603 |
+
- Возвращает результат в исходном dtype
|
| 604 |
+
|
| 605 |
+
Args:
|
| 606 |
+
tensor: входной тензор
|
| 607 |
+
size: целевой размер (H, W)
|
| 608 |
+
mode: режим интерполяции ('bilinear', 'bicubic', 'nearest')
|
| 609 |
+
|
| 610 |
+
Returns:
|
| 611 |
+
torch.Tensor: интерполированный тензор в исходном dtype
|
| 612 |
+
"""
|
| 613 |
+
original_dtype = tensor.dtype
|
| 614 |
+
|
| 615 |
+
# Для float16 конвертируем в float32 для стабильности
|
| 616 |
+
if original_dtype == torch.float16:
|
| 617 |
+
tensor = tensor.float()
|
| 618 |
+
|
| 619 |
+
# FIX: Правильная обработка align_corners (не использовать None!)
|
| 620 |
+
interpolate_kwargs = {
|
| 621 |
+
'size': size,
|
| 622 |
+
'mode': mode,
|
| 623 |
+
}
|
| 624 |
+
if mode != 'nearest':
|
| 625 |
+
interpolate_kwargs['align_corners'] = True
|
| 626 |
+
|
| 627 |
+
result = F.interpolate(tensor, **interpolate_kwargs)
|
| 628 |
+
|
| 629 |
+
# Конвертируем обратно в исходный dtype
|
| 630 |
+
if original_dtype == torch.float16:
|
| 631 |
+
result = result.half()
|
| 632 |
+
|
| 633 |
+
return result
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 637 |
+
# 3. ПОЛНОСТЬЮ ПЕРЕРАБОТАННЫЙ OUTPAINT ZOOM (V3.0)
|
| 638 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 639 |
+
|
| 640 |
+
def apply_outpaint_zoom(input_tensor, zoom_factor, pad_h, pad_w,
|
| 641 |
+
convergence=0.5, convergence_y=0.5,
|
| 642 |
+
fade_strength=0.3, depth_power=1.0,
|
| 643 |
+
pan_x=0.0, pan_y=0.0,
|
| 644 |
+
fade_to_black=False, fade_edge_strength=0.15,
|
| 645 |
+
blend_mode='circular_reflect',
|
| 646 |
+
noise_strength=1.0,
|
| 647 |
+
interp_mode='bilinear',
|
| 648 |
+
zoom_in_fade=True,
|
| 649 |
+
variance_correction=True,
|
| 650 |
+
auto_clamp_pan=True,
|
| 651 |
+
adaptive_noise_scale=True,
|
| 652 |
+
debug=False,
|
| 653 |
+
extra_params=None): # V3.1.1 FIX: Добавлен extra_params
|
| 654 |
+
"""
|
| 655 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 656 |
+
V3.0 - ПРОФЕССИОНАЛЬНАЯ ВЕРСИЯ С ИСПРАВЛЕНИЕМ ВСЕХ ПРОБЛЕМ
|
| 657 |
+
V3.1.1 - КРИТИЧНЫЕ ИСПРАВЛЕНИЯ ДЛЯ FLOAT16
|
| 658 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 659 |
+
|
| 660 |
+
ИСПРАВЛЕНИЯ V3.0:
|
| 661 |
+
🔧 noise_strength теперь 1.0 (было 0.1) - coherent outpainting
|
| 662 |
+
🔧 interp_mode параметр для выбора интерполяции
|
| 663 |
+
🔧 zoom_in_fade для устранения швов при приближении
|
| 664 |
+
🔧 variance_correction для устранения серости
|
| 665 |
+
🔧 auto_clamp_pan для безопасного сдвига
|
| 666 |
+
🔧 adaptive_noise_scale для умного масштабирования шума
|
| 667 |
+
🔧 debug режим для диагностики
|
| 668 |
+
🔧 Улучшенная валидация с warnings
|
| 669 |
+
|
| 670 |
+
ИСПРАВЛЕНИЯ V3.1.1:
|
| 671 |
+
🔧 extra_params для передачи параметров в gradient_radial/noise_blend
|
| 672 |
+
🔧 safe_interpolate вместо F.interpolate для float16
|
| 673 |
+
🔧 Адаптивный epsilon для всех операций
|
| 674 |
+
|
| 675 |
+
Args:
|
| 676 |
+
input_tensor: входной латент (b, c, h, w)
|
| 677 |
+
zoom_factor: сила зума (-10 до +10)
|
| 678 |
+
pad_h, pad_w: padding размеры
|
| 679 |
+
convergence, convergence_y: точки фокуса (0-1)
|
| 680 |
+
fade_strength: сила fade на контенте (0-1)
|
| 681 |
+
depth_power: кривая градиента fade (<1 резче, >1 мягче)
|
| 682 |
+
pan_x, pan_y: сдвиг (-1 до +1)
|
| 683 |
+
fade_to_black: затемнение внешних краев canvas
|
| 684 |
+
fade_edge_strength: сила edge fade
|
| 685 |
+
blend_mode: режим блендинга
|
| 686 |
+
noise_strength: сила шума для outpainting (0.5-1.5, default: 1.0) 🆕
|
| 687 |
+
interp_mode: режим интерполяции ('bilinear'/'bicubic'/'nearest') 🆕
|
| 688 |
+
zoom_in_fade: применять fade при zoom in 🆕
|
| 689 |
+
variance_correction: коррекция серости 🆕
|
| 690 |
+
auto_clamp_pan: автоматическая коррекция pan 🆕
|
| 691 |
+
adaptive_noise_scale: адаптивное масштабирование шума 🆕
|
| 692 |
+
debug: режим отладки 🆕
|
| 693 |
+
extra_params: дополнительные параметры (dict) 🆕 V3.1.1
|
| 694 |
+
|
| 695 |
+
Returns:
|
| 696 |
+
torch.Tensor: результат зума с padding
|
| 697 |
+
"""
|
| 698 |
+
b, c, h, w = input_tensor.shape
|
| 699 |
+
device = input_tensor.device
|
| 700 |
+
dtype = input_tensor.dtype
|
| 701 |
+
|
| 702 |
+
if debug:
|
| 703 |
+
print(f"\n{'='*70}")
|
| 704 |
+
print(f"[Outpaint Zoom V3.0] Starting...")
|
| 705 |
+
print(f" Input shape: {input_tensor.shape}")
|
| 706 |
+
print(f" Zoom factor: {zoom_factor:.2f}")
|
| 707 |
+
print(f" Noise strength: {noise_strength:.2f}")
|
| 708 |
+
print(f" Interp mode: {interp_mode}")
|
| 709 |
+
print(f"{'='*70}\n")
|
| 710 |
+
|
| 711 |
+
is_zooming = abs(zoom_factor) > 0.001
|
| 712 |
+
is_panning = abs(pan_x) > 0.001 or abs(pan_y) > 0.001
|
| 713 |
+
|
| 714 |
+
# Если ничего не происходит — просто возвращаем паддинг
|
| 715 |
+
if not is_zooming and not is_panning:
|
| 716 |
+
return F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 717 |
+
|
| 718 |
+
# V3.0: Валидация interp_mode
|
| 719 |
+
valid_modes = ['bilinear', 'bicubic', 'nearest']
|
| 720 |
+
if interp_mode not in valid_modes:
|
| 721 |
+
if debug:
|
| 722 |
+
print(f"⚠️ Warning: Invalid interp_mode '{interp_mode}', using 'bilinear'")
|
| 723 |
+
interp_mode = 'bilinear'
|
| 724 |
+
|
| 725 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 726 |
+
# ZOOM OUT (ОТДАЛЕНИЕ)
|
| 727 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 728 |
+
if zoom_factor < 0:
|
| 729 |
+
# V3.1: Проверяем специальные blend режимы
|
| 730 |
+
if blend_mode == 'gradient_radial':
|
| 731 |
+
# V3.1.1 FIX: Используем параметры из extra_params если есть
|
| 732 |
+
gradient_center_x = 0.5
|
| 733 |
+
gradient_center_y = 0.5
|
| 734 |
+
gradient_radius = 1.0
|
| 735 |
+
|
| 736 |
+
if extra_params:
|
| 737 |
+
gradient_center_x = extra_params.get('gradient_center_x', 0.5)
|
| 738 |
+
gradient_center_y = extra_params.get('gradient_center_y', 0.5)
|
| 739 |
+
gradient_radius = extra_params.get('gradient_radius', 1.0)
|
| 740 |
+
|
| 741 |
+
if debug:
|
| 742 |
+
print(f"[Blend Mode] Using GRADIENT_RADIAL")
|
| 743 |
+
return apply_gradient_radial_blend(
|
| 744 |
+
input_tensor, pad_h, pad_w,
|
| 745 |
+
gradient_center_x=gradient_center_x,
|
| 746 |
+
gradient_center_y=gradient_center_y,
|
| 747 |
+
gradient_radius=gradient_radius,
|
| 748 |
+
debug=debug
|
| 749 |
+
)
|
| 750 |
+
elif blend_mode == 'noise_blend':
|
| 751 |
+
# V3.1.1 FIX: Используем параметр�� из extra_params если есть
|
| 752 |
+
noise_scale = 5.0
|
| 753 |
+
noise_octaves = 2
|
| 754 |
+
|
| 755 |
+
if extra_params:
|
| 756 |
+
noise_scale = extra_params.get('noise_scale', 5.0)
|
| 757 |
+
noise_octaves = extra_params.get('noise_octaves', 2)
|
| 758 |
+
|
| 759 |
+
if debug:
|
| 760 |
+
print(f"[Blend Mode] Using NOISE_BLEND")
|
| 761 |
+
return apply_noise_blend(
|
| 762 |
+
input_tensor, pad_h, pad_w,
|
| 763 |
+
noise_scale=noise_scale,
|
| 764 |
+
noise_octaves=noise_octaves,
|
| 765 |
+
debug=debug
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
# 1. Масштабирование
|
| 769 |
+
scale = 1.0 + abs(zoom_factor) * 0.1
|
| 770 |
+
scale = max(1.0, min(scale, 4.0))
|
| 771 |
+
|
| 772 |
+
new_h = max(int(h / scale), 16)
|
| 773 |
+
new_w = max(int(w / scale), 16)
|
| 774 |
+
|
| 775 |
+
if debug:
|
| 776 |
+
print(f"[Zoom Out] Scale: {scale:.4f}, New size: {new_h}x{new_w}")
|
| 777 |
+
|
| 778 |
+
# V3.1.1 FIX: Используем safe_interpolate для float16 совместимости
|
| 779 |
+
content_small = safe_interpolate(
|
| 780 |
+
input_tensor,
|
| 781 |
+
size=(new_h, new_w),
|
| 782 |
+
mode=interp_mode
|
| 783 |
+
)
|
| 784 |
+
|
| 785 |
+
# 2. Создаем fade маску (с учетом Depth Power)
|
| 786 |
+
mask = torch.ones(1, 1, new_h, new_w, device=device, dtype=dtype)
|
| 787 |
+
|
| 788 |
+
fade_h = int(new_h * fade_strength)
|
| 789 |
+
fade_w = int(new_w * fade_strength)
|
| 790 |
+
|
| 791 |
+
if fade_h > 0 and fade_w > 0:
|
| 792 |
+
lin_x = torch.linspace(0, 1, fade_w, device=device, dtype=dtype)
|
| 793 |
+
lin_y = torch.linspace(0, 1, fade_h, device=device, dtype=dtype)
|
| 794 |
+
|
| 795 |
+
# Depth Power для кривой градиента
|
| 796 |
+
curve_x = torch.pow(lin_x, depth_power)
|
| 797 |
+
curve_y = torch.pow(lin_y, depth_power)
|
| 798 |
+
|
| 799 |
+
mask[:, :, :, :fade_w] *= curve_x.view(1, 1, 1, -1)
|
| 800 |
+
mask[:, :, :, -fade_w:] *= curve_x.flip(0).view(1, 1, 1, -1)
|
| 801 |
+
mask[:, :, :fade_h, :] *= curve_y.view(1, 1, -1, 1)
|
| 802 |
+
mask[:, :, -fade_h:, :] *= curve_y.flip(0).view(1, 1, -1, 1)
|
| 803 |
+
|
| 804 |
+
content_faded = content_small * mask.expand_as(content_small)
|
| 805 |
+
|
| 806 |
+
# 3. Создаем canvas с адаптивным шумом
|
| 807 |
+
canvas_h = h + 2 * pad_h
|
| 808 |
+
canvas_w = w + 2 * pad_w
|
| 809 |
+
|
| 810 |
+
# Convergence определяет позицию фокуса
|
| 811 |
+
focus_x = int((canvas_w - new_w) * convergence)
|
| 812 |
+
focus_y = int((canvas_h - new_h) * convergence_y)
|
| 813 |
+
|
| 814 |
+
center_x = focus_x
|
| 815 |
+
center_y = focus_y
|
| 816 |
+
|
| 817 |
+
# V3.0: Pan с auto-clamping
|
| 818 |
+
# EDGE-CASE FIX: when new_w/new_h >= canvas (tiny latent + min-16 forcing),
|
| 819 |
+
# (canvas - new) is negative → max_pan goes negative → clamp flips pan to +1
|
| 820 |
+
# producing false clipping warnings. Explicitly zero pan on any axis where
|
| 821 |
+
# content is at least as large as the canvas.
|
| 822 |
+
if auto_clamp_pan:
|
| 823 |
+
if new_w >= canvas_w:
|
| 824 |
+
pan_x = 0.0
|
| 825 |
+
else:
|
| 826 |
+
max_pan_x = (canvas_w - new_w) / canvas_w
|
| 827 |
+
pan_x = max(-max_pan_x, min(max_pan_x, pan_x))
|
| 828 |
+
if new_h >= canvas_h:
|
| 829 |
+
pan_y = 0.0
|
| 830 |
+
else:
|
| 831 |
+
max_pan_y = (canvas_h - new_h) / canvas_h
|
| 832 |
+
pan_y = max(-max_pan_y, min(max_pan_y, pan_y))
|
| 833 |
+
|
| 834 |
+
shift_y = int(pan_y * canvas_h * 0.5)
|
| 835 |
+
shift_x = int(pan_x * canvas_w * 0.5)
|
| 836 |
+
|
| 837 |
+
paste_y = center_y + shift_y
|
| 838 |
+
paste_x = center_x + shift_x
|
| 839 |
+
|
| 840 |
+
# V3.0: Улучшенная валидация с warnings
|
| 841 |
+
# EDGE-CASE FIX: suppress warning when content >= canvas on an axis —
|
| 842 |
+
# the overflow is structural (min-16 forcing), not caused by user pan.
|
| 843 |
+
clipped = False
|
| 844 |
+
if (paste_y < 0 or paste_y + new_h > canvas_h) and new_h < canvas_h:
|
| 845 |
+
print(f"⚠️ Warning: Pan Y ({pan_y:.2f}) causes vertical clipping")
|
| 846 |
+
clipped = True
|
| 847 |
+
if (paste_x < 0 or paste_x + new_w > canvas_w) and new_w < canvas_w:
|
| 848 |
+
print(f"⚠️ Warning: Pan X ({pan_x:.2f}) causes horizontal clipping")
|
| 849 |
+
clipped = True
|
| 850 |
+
|
| 851 |
+
# Безопасная вставка с clipping
|
| 852 |
+
y1_c = max(0, paste_y)
|
| 853 |
+
x1_c = max(0, paste_x)
|
| 854 |
+
y2_c = min(canvas_h, paste_y + new_h)
|
| 855 |
+
x2_c = min(canvas_w, paste_x + new_w)
|
| 856 |
+
|
| 857 |
+
y1_src = max(0, -paste_y)
|
| 858 |
+
x1_src = max(0, -paste_x)
|
| 859 |
+
y2_src = y1_src + (y2_c - y1_c)
|
| 860 |
+
x2_src = x1_src + (x2_c - x1_c)
|
| 861 |
+
|
| 862 |
+
# V3.0: ИСПРАВЛЕНО - Адаптивный латентный шум с правильной силой
|
| 863 |
+
input_stats = compute_latent_statistics(input_tensor, percentile_clip=True)
|
| 864 |
+
content_box = (y1_c, y2_c, x1_c, x2_c)
|
| 865 |
+
|
| 866 |
+
# === ФИНАЛЬНЫЙ РАБОЧИЙ ВАРИАНТ ===
|
| 867 |
+
|
| 868 |
+
# 1. Вычисляем, сколько места пустого слева, справа, сверх��, снизу
|
| 869 |
+
pad_left = x1_c
|
| 870 |
+
pad_right = canvas_w - x2_c
|
| 871 |
+
pad_top = y1_c
|
| 872 |
+
pad_bottom = canvas_h - y2_c
|
| 873 |
+
|
| 874 |
+
# 2. ГЛАВНОЕ ИСПРАВЛЕНИЕ:
|
| 875 |
+
# Мы берем content_small (это УМЕНЬШЕННАЯ картинка).
|
| 876 |
+
# И добавляем к ней края (mode='reflect' - это зеркальное отражение, чтобы не было швов).
|
| 877 |
+
# В итоге получается картинка нужного размера (как холст).
|
| 878 |
+
|
| 879 |
+
# BUG FIX 2: reflect padding crashes when any pad >= the reduced
|
| 880 |
+
# dimension. Use it only when it's geometrically valid; otherwise
|
| 881 |
+
# fall back to replicate so we never hard-crash.
|
| 882 |
+
reflect_safe = (pad_left < new_w and pad_right < new_w and
|
| 883 |
+
pad_top < new_h and pad_bottom < new_h)
|
| 884 |
+
if reflect_safe:
|
| 885 |
+
canvas = F.pad(content_small,
|
| 886 |
+
(pad_left, pad_right, pad_top, pad_bottom),
|
| 887 |
+
mode='reflect')
|
| 888 |
+
else:
|
| 889 |
+
# Fallback: replicate is always safe regardless of pad size.
|
| 890 |
+
# For larger outpainting factors this is visually acceptable and
|
| 891 |
+
# lets create_adaptive_latent_noise blend over it below.
|
| 892 |
+
canvas = F.pad(content_small,
|
| 893 |
+
(pad_left, pad_right, pad_top, pad_bottom),
|
| 894 |
+
mode='replicate')
|
| 895 |
+
|
| 896 |
+
# BUG FIX 4: previously the canvas was built solely from reflect/
|
| 897 |
+
# replicate padding of the shrunken content, making noise_strength
|
| 898 |
+
# and adaptive_noise_scale purely decorative. Now we actually use
|
| 899 |
+
# create_adaptive_latent_noise to fill the background of the canvas
|
| 900 |
+
# and blend it with the padded content so those parameters have a
|
| 901 |
+
# real visible effect.
|
| 902 |
+
#
|
| 903 |
+
# FIX 4 REGRESSION PATCH: when new_h/new_w are forced to min=16 and
|
| 904 |
+
# the input is tiny (e.g. 8x8 latent), the pads can collapse to zero
|
| 905 |
+
# and canvas ends up being new_h×new_w (e.g. 16×16), not the
|
| 906 |
+
# pre-computed canvas_h×canvas_w (8×8). Always read the real shape
|
| 907 |
+
# after F.pad and drive everything from those actual dimensions.
|
| 908 |
+
actual_canvas_h, actual_canvas_w = canvas.shape[-2:]
|
| 909 |
+
|
| 910 |
+
if noise_strength > 0.01:
|
| 911 |
+
# Clip content_box to actual canvas bounds (safe for tiny inputs)
|
| 912 |
+
box_y1 = min(y1_c, actual_canvas_h)
|
| 913 |
+
box_y2 = min(y2_c, actual_canvas_h)
|
| 914 |
+
box_x1 = min(x1_c, actual_canvas_w)
|
| 915 |
+
box_x2 = min(x2_c, actual_canvas_w)
|
| 916 |
+
actual_content_box = (box_y1, box_y2, box_x1, box_x2)
|
| 917 |
+
|
| 918 |
+
adaptive_canvas = create_adaptive_latent_noise(
|
| 919 |
+
canvas_shape=(b, c, actual_canvas_h, actual_canvas_w),
|
| 920 |
+
content_box=actual_content_box,
|
| 921 |
+
zoom_factor=zoom_factor,
|
| 922 |
+
input_stats=input_stats,
|
| 923 |
+
device=device,
|
| 924 |
+
dtype=dtype,
|
| 925 |
+
blend_mode=blend_mode,
|
| 926 |
+
noise_strength=noise_strength,
|
| 927 |
+
adaptive_scale=adaptive_noise_scale,
|
| 928 |
+
)
|
| 929 |
+
# Blend: keep padded content as base, overlay adaptive noise
|
| 930 |
+
# in the outpaint region only (outside the content paste box).
|
| 931 |
+
content_region_mask = torch.zeros(1, 1, actual_canvas_h, actual_canvas_w,
|
| 932 |
+
device=device, dtype=dtype)
|
| 933 |
+
if box_y2 > box_y1 and box_x2 > box_x1:
|
| 934 |
+
content_region_mask[:, :, box_y1:box_y2, box_x1:box_x2] = 1.0
|
| 935 |
+
canvas = canvas * content_region_mask + adaptive_canvas * (1.0 - content_region_mask)
|
| 936 |
+
|
| 937 |
+
if debug:
|
| 938 |
+
print(f"[Noise Stats] Mean: {canvas.mean().item():.4f}, "
|
| 939 |
+
f"Std: {canvas.std().item():.4f}")
|
| 940 |
+
|
| 941 |
+
# Вставляем контент
|
| 942 |
+
if y2_c > y1_c and x2_c > x1_c:
|
| 943 |
+
canvas[:, :, y1_c:y2_c, x1_c:x2_c] = content_faded[:, :, y1_src:y2_src, x1_src:x2_src]
|
| 944 |
+
|
| 945 |
+
# V3.0: fade_to_black для краев canvas (use actual dims)
|
| 946 |
+
if fade_to_black:
|
| 947 |
+
edge_fade_h = int(actual_canvas_h * fade_edge_strength)
|
| 948 |
+
edge_fade_w = int(actual_canvas_w * fade_edge_strength)
|
| 949 |
+
|
| 950 |
+
if edge_fade_h > 0 and edge_fade_w > 0:
|
| 951 |
+
fade_mask = torch.ones(1, 1, actual_canvas_h, actual_canvas_w, device=device, dtype=dtype)
|
| 952 |
+
|
| 953 |
+
fade_h = torch.linspace(0, 1, edge_fade_h, device=device, dtype=dtype)
|
| 954 |
+
fade_w = torch.linspace(0, 1, edge_fade_w, device=device, dtype=dtype)
|
| 955 |
+
|
| 956 |
+
fade_mask[:, :, :edge_fade_h, :] *= fade_h.view(-1, 1)
|
| 957 |
+
fade_mask[:, :, -edge_fade_h:, :] *= fade_h.flip(0).view(-1, 1)
|
| 958 |
+
fade_mask[:, :, :, :edge_fade_w] *= fade_w.view(1, -1)
|
| 959 |
+
fade_mask[:, :, :, -edge_fade_w:] *= fade_w.flip(0).view(1, -1)
|
| 960 |
+
|
| 961 |
+
canvas = canvas * fade_mask.expand_as(canvas)
|
| 962 |
+
|
| 963 |
+
# V3.0: Shape check (converted from hard assert so tiny latents don't crash;
|
| 964 |
+
# when new_h/new_w are forced to min=16 the canvas can legitimately differ
|
| 965 |
+
# from the pre-computed canvas_h×canvas_w on very small inputs).
|
| 966 |
+
if canvas.shape != (b, c, canvas_h, canvas_w):
|
| 967 |
+
if debug:
|
| 968 |
+
print(f"[Outpaint Zoom] Note: canvas shape {canvas.shape} differs from "
|
| 969 |
+
f"expected {(b, c, canvas_h, canvas_w)} (normal for tiny latents)")
|
| 970 |
+
|
| 971 |
+
return canvas
|
| 972 |
+
|
| 973 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 974 |
+
# ZOOM IN (ПРИБЛИЖЕНИЕ)
|
| 975 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 976 |
+
else:
|
| 977 |
+
scale = 1.0 + zoom_factor * 0.1
|
| 978 |
+
new_h = int(h * scale)
|
| 979 |
+
new_w = int(w * scale)
|
| 980 |
+
|
| 981 |
+
if debug:
|
| 982 |
+
print(f"[Zoom In] Scale: {scale:.4f}, New size: {new_h}x{new_w}")
|
| 983 |
+
|
| 984 |
+
# V3.1.1 FIX: Используем safe_interpolate для float16 совместимости
|
| 985 |
+
content_large = safe_interpolate(
|
| 986 |
+
input_tensor,
|
| 987 |
+
size=(new_h, new_w),
|
| 988 |
+
mode=interp_mode
|
| 989 |
+
)
|
| 990 |
+
|
| 991 |
+
# Convergence для zoom in - определяет откуда "смотрим"
|
| 992 |
+
focus_x = int(new_w * convergence) - w // 2
|
| 993 |
+
focus_y = int(new_h * convergence_y) - h // 2
|
| 994 |
+
|
| 995 |
+
# V3.0: Pan с auto-clamping
|
| 996 |
+
if auto_clamp_pan:
|
| 997 |
+
max_pan_x = (new_w - w) / w * 0.5
|
| 998 |
+
max_pan_y = (new_h - h) / h * 0.5
|
| 999 |
+
pan_x = max(-max_pan_x, min(max_pan_x, pan_x))
|
| 1000 |
+
pan_y = max(-max_pan_y, min(max_pan_y, pan_y))
|
| 1001 |
+
|
| 1002 |
+
shift_y = int(pan_y * h * 0.5)
|
| 1003 |
+
shift_x = int(pan_x * w * 0.5)
|
| 1004 |
+
|
| 1005 |
+
crop_y = max(0, min(new_h - h, focus_y + shift_y))
|
| 1006 |
+
crop_x = max(0, min(new_w - w, focus_x + shift_x))
|
| 1007 |
+
|
| 1008 |
+
cropped = content_large[:, :, crop_y:crop_y+h, crop_x:crop_x+w]
|
| 1009 |
+
|
| 1010 |
+
# V3.0: НОВОЕ - Fade для zoom in (устраняет швы!)
|
| 1011 |
+
# BUG FIX 3: initialise fade_mask=None so downstream code can always
|
| 1012 |
+
# do a safe `if fade_mask is not None:` check. Without this,
|
| 1013 |
+
# if fade_h_in==0 or fade_w_in==0 the variable is never created but
|
| 1014 |
+
# is still referenced inside the variance_correction block.
|
| 1015 |
+
fade_mask = None
|
| 1016 |
+
if zoom_in_fade and fade_strength > 0:
|
| 1017 |
+
# Легкий fade на краях для smooth transitions
|
| 1018 |
+
fade_h_in = int(h * fade_strength * 0.5) # Меньше чем для zoom out
|
| 1019 |
+
fade_w_in = int(w * fade_strength * 0.5)
|
| 1020 |
+
|
| 1021 |
+
if fade_h_in > 0 and fade_w_in > 0:
|
| 1022 |
+
fade_mask = torch.ones(1, 1, h, w, device=device, dtype=dtype)
|
| 1023 |
+
|
| 1024 |
+
lin_x = torch.linspace(0, 1, fade_w_in, device=device, dtype=dtype)
|
| 1025 |
+
lin_y = torch.linspace(0, 1, fade_h_in, device=device, dtype=dtype)
|
| 1026 |
+
|
| 1027 |
+
curve_x = torch.pow(lin_x, depth_power)
|
| 1028 |
+
curve_y = torch.pow(lin_y, depth_power)
|
| 1029 |
+
|
| 1030 |
+
fade_mask[:, :, :, :fade_w_in] *= curve_x.view(1, 1, 1, -1)
|
| 1031 |
+
fade_mask[:, :, :, -fade_w_in:] *= curve_x.flip(0).view(1, 1, 1, -1)
|
| 1032 |
+
fade_mask[:, :, :fade_h_in, :] *= curve_y.view(1, 1, -1, 1)
|
| 1033 |
+
fade_mask[:, :, -fade_h_in:, :] *= curve_y.flip(0).view(1, 1, -1, 1)
|
| 1034 |
+
|
| 1035 |
+
cropped = cropped * fade_mask.expand_as(cropped)
|
| 1036 |
+
|
| 1037 |
+
if debug:
|
| 1038 |
+
print(f"[Zoom In Fade] Applied with strength {fade_strength:.2f}")
|
| 1039 |
+
|
| 1040 |
+
# Padding
|
| 1041 |
+
padded = F.pad(cropped, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1042 |
+
|
| 1043 |
+
# V3.0: Variance correction для устранения серости на швах
|
| 1044 |
+
if variance_correction and zoom_in_fade:
|
| 1045 |
+
# BUG FIX 3 (continued): use fade_mask only if it was actually
|
| 1046 |
+
# created; fall back to a neutral all-ones mask when fade
|
| 1047 |
+
# dimensions collapsed to zero (very small latents).
|
| 1048 |
+
if fade_mask is not None:
|
| 1049 |
+
correction_mask = fade_mask
|
| 1050 |
+
else:
|
| 1051 |
+
# Fade dimensions were zero — use a flat mask so
|
| 1052 |
+
# variance_correction still runs without crashing.
|
| 1053 |
+
correction_mask = torch.ones(1, 1, h, w, device=device, dtype=dtype)
|
| 1054 |
+
|
| 1055 |
+
padded = apply_variance_correction(padded, correction_mask, debug=debug)
|
| 1056 |
+
|
| 1057 |
+
# V3.0: Shape assertion
|
| 1058 |
+
expected_shape = (b, c, h + 2*pad_h, w + 2*pad_w)
|
| 1059 |
+
assert padded.shape == expected_shape, \
|
| 1060 |
+
f"Shape mismatch! Expected {expected_shape}, got {padded.shape}"
|
| 1061 |
+
|
| 1062 |
+
return padded
|
| 1063 |
+
|
| 1064 |
+
|
| 1065 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 1066 |
+
# 3.5. SPIRAL ZOOM (V3.1 - НОВАЯ ФУНКЦИЯ БЕЗ БАГОВ)
|
| 1067 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 1068 |
+
|
| 1069 |
+
def apply_spiral_zoom(input_tensor, zoom_factor, pad_h, pad_w,
|
| 1070 |
+
spiral_rotation=0.5,
|
| 1071 |
+
spiral_direction=1.0,
|
| 1072 |
+
interp_mode='bilinear',
|
| 1073 |
+
debug=False,
|
| 1074 |
+
**kwargs):
|
| 1075 |
+
"""
|
| 1076 |
+
Спиральный зум с эффектом вращения.
|
| 1077 |
+
|
| 1078 |
+
V3.1: ПОЛНАЯ РЕАЛИЗАЦИЯ БЕЗ БАГОВ
|
| 1079 |
+
- Правильная валидация параметров
|
| 1080 |
+
- Безопасная обработка особых случаев (dx=dy=0)
|
| 1081 |
+
- Проверка размеров на всех этапах
|
| 1082 |
+
|
| 1083 |
+
V3.1.1: КРИТИЧНЫЕ ИСПРАВЛЕНИЯ ДЛЯ FLOAT16
|
| 1084 |
+
- Адаптивный epsilon для всех sqrt/division операций
|
| 1085 |
+
|
| 1086 |
+
Args:
|
| 1087 |
+
input_tensor: входной латент (B, C, H, W)
|
| 1088 |
+
zoom_factor: сила зума (-5.0 до 5.0)
|
| 1089 |
+
pad_h, pad_w: размеры паддинга
|
| 1090 |
+
spiral_rotation: сила вращения (0.0 до 2.0)
|
| 1091 |
+
- 0.0 = без вращения (обычный зум)
|
| 1092 |
+
- 0.5 = слабое вращение
|
| 1093 |
+
- 1.0 = среднее вращение
|
| 1094 |
+
- 2.0 = сильное вращение
|
| 1095 |
+
spiral_direction: направление (1.0 = по часовой, -1.0 = против)
|
| 1096 |
+
interp_mode: режим интерполяции ('bilinear', 'bicubic', 'nearest')
|
| 1097 |
+
debug: вывод отладочной информации
|
| 1098 |
+
|
| 1099 |
+
Returns:
|
| 1100 |
+
torch.Tensor: трансформированный и padded тензор
|
| 1101 |
+
"""
|
| 1102 |
+
b, c, h, w = input_tensor.shape
|
| 1103 |
+
device = input_tensor.device
|
| 1104 |
+
dtype = input_tensor.dtype
|
| 1105 |
+
|
| 1106 |
+
# V3.1.1 FIX: Получаем адаптивный epsilon для данного dtype
|
| 1107 |
+
eps = get_adaptive_epsilon(dtype)
|
| 1108 |
+
|
| 1109 |
+
# V3.1: Валидация параметров
|
| 1110 |
+
spiral_rotation = float(max(0.0, min(2.0, spiral_rotation)))
|
| 1111 |
+
spiral_direction = 1.0 if spiral_direction >= 0 else -1.0
|
| 1112 |
+
zoom_factor = float(max(-5.0, min(5.0, zoom_factor)))
|
| 1113 |
+
|
| 1114 |
+
if debug:
|
| 1115 |
+
print(f"\n{'='*70}")
|
| 1116 |
+
print(f"[Spiral Zoom V3.1.1]")
|
| 1117 |
+
print(f" Input shape: {input_tensor.shape}")
|
| 1118 |
+
print(f" Zoom Factor: {zoom_factor:.2f}")
|
| 1119 |
+
print(f" Rotation: {spiral_rotation:.2f} ({'clockwise' if spiral_direction > 0 else 'counter-clockwise'})")
|
| 1120 |
+
print(f" Interp mode: {interp_mode}")
|
| 1121 |
+
print(f" Epsilon: {eps} (for {dtype})")
|
| 1122 |
+
print(f"{'='*70}\n")
|
| 1123 |
+
|
| 1124 |
+
# Центр изображения
|
| 1125 |
+
center_y = (h - 1) / 2.0
|
| 1126 |
+
center_x = (w - 1) / 2.0
|
| 1127 |
+
|
| 1128 |
+
# Создаем координатные сетки
|
| 1129 |
+
y_coords = torch.arange(h, device=device, dtype=dtype).view(-1, 1).expand(h, w)
|
| 1130 |
+
x_coords = torch.arange(w, device=device, dtype=dtype).view(1, -1).expand(h, w)
|
| 1131 |
+
|
| 1132 |
+
# Смещение от центра
|
| 1133 |
+
dy = y_coords - center_y
|
| 1134 |
+
dx = x_coords - center_x
|
| 1135 |
+
|
| 1136 |
+
# V3.1.1 FIX: Полярные координаты с адаптивным epsilon
|
| 1137 |
+
r = torch.sqrt(dx**2 + dy**2 + eps)
|
| 1138 |
+
theta = torch.atan2(dy, dx)
|
| 1139 |
+
|
| 1140 |
+
# Спиральная трансформация
|
| 1141 |
+
# 1. Zoom scale
|
| 1142 |
+
zoom_scale = 1.0 + zoom_factor * 0.1
|
| 1143 |
+
|
| 1144 |
+
# 2. Rotation - зависит от расстояния от центра
|
| 1145 |
+
max_radius = math.sqrt(h**2 + w**2) / 2.0
|
| 1146 |
+
# V3.1.1 FIX: Адаптивный epsilon для division
|
| 1147 |
+
normalized_r = torch.clamp(r / (max_radius + eps), 0.0, 1.0)
|
| 1148 |
+
|
| 1149 |
+
# Угол вращения увеличивается с расстоянием от центра (спиральный эффект)
|
| 1150 |
+
rotation_angle = spiral_direction * spiral_rotation * normalized_r * math.pi
|
| 1151 |
+
|
| 1152 |
+
# 3. Применяем трансформацию
|
| 1153 |
+
new_theta = theta + rotation_angle
|
| 1154 |
+
new_r = r * zoom_scale
|
| 1155 |
+
|
| 1156 |
+
# Обратно в декартовы координаты
|
| 1157 |
+
new_x = center_x + new_r * torch.cos(new_theta)
|
| 1158 |
+
new_y = center_y + new_r * torch.sin(new_theta)
|
| 1159 |
+
|
| 1160 |
+
# Нормализация для grid_sample [-1, 1]
|
| 1161 |
+
grid_x = 2.0 * new_x / max(w - 1, 1) - 1.0
|
| 1162 |
+
grid_y = 2.0 * new_y / max(h - 1, 1) - 1.0
|
| 1163 |
+
|
| 1164 |
+
# V3.1 FIX: Clamp grid values для предотвращения выхода за границы
|
| 1165 |
+
grid_x = torch.clamp(grid_x, -1.0, 1.0)
|
| 1166 |
+
grid_y = torch.clamp(grid_y, -1.0, 1.0)
|
| 1167 |
+
|
| 1168 |
+
# Собира��м grid
|
| 1169 |
+
grid = torch.stack([grid_x, grid_y], dim=-1).unsqueeze(0).to(dtype)
|
| 1170 |
+
|
| 1171 |
+
# V3.1: Валидация размеров grid
|
| 1172 |
+
expected_grid_shape = (1, h, w, 2)
|
| 1173 |
+
if grid.shape != expected_grid_shape:
|
| 1174 |
+
raise ValueError(f"Grid shape mismatch! Expected {expected_grid_shape}, got {grid.shape}")
|
| 1175 |
+
|
| 1176 |
+
# Применяем деформацию
|
| 1177 |
+
warped = F.grid_sample(
|
| 1178 |
+
input_tensor,
|
| 1179 |
+
grid.expand(b, -1, -1, -1),
|
| 1180 |
+
mode=interp_mode,
|
| 1181 |
+
padding_mode='zeros',
|
| 1182 |
+
align_corners=True
|
| 1183 |
+
)
|
| 1184 |
+
|
| 1185 |
+
# V3.1: Проверка после warp
|
| 1186 |
+
if warped.shape != input_tensor.shape:
|
| 1187 |
+
raise ValueError(f"Warped shape mismatch! Expected {input_tensor.shape}, got {warped.shape}")
|
| 1188 |
+
|
| 1189 |
+
# Паддинг
|
| 1190 |
+
padded = F.pad(warped, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1191 |
+
|
| 1192 |
+
# V3.1: Финальная проверка размеров
|
| 1193 |
+
expected_padded_shape = (b, c, h + 2*pad_h, w + 2*pad_w)
|
| 1194 |
+
if padded.shape != expected_padded_shape:
|
| 1195 |
+
raise ValueError(f"Padded shape mismatch! Expected {expected_padded_shape}, got {padded.shape}")
|
| 1196 |
+
|
| 1197 |
+
if debug:
|
| 1198 |
+
print(f"[Spiral Zoom] Input shape: {input_tensor.shape}")
|
| 1199 |
+
print(f"[Spiral Zoom] Output shape: {padded.shape}")
|
| 1200 |
+
print(f"[Spiral Zoom] ✓ All shape checks passed")
|
| 1201 |
+
|
| 1202 |
+
return padded
|
| 1203 |
+
|
| 1204 |
+
|
| 1205 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 1206 |
+
# 3.6. GRADIENT RADIAL BLENDING (V3.1 - НОВАЯ ФУНКЦИЯ)
|
| 1207 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 1208 |
+
|
| 1209 |
+
def apply_gradient_radial_blend(input_tensor, pad_h, pad_w,
|
| 1210 |
+
gradient_center_x=0.5, gradient_center_y=0.5,
|
| 1211 |
+
gradient_radius=1.0, debug=False):
|
| 1212 |
+
"""
|
| 1213 |
+
Радиальный градиент для плавных переходов от центра к краям.
|
| 1214 |
+
|
| 1215 |
+
V3.1: НОВАЯ ФУНКЦИЯ
|
| 1216 |
+
|
| 1217 |
+
Args:
|
| 1218 |
+
input_tensor: входной латент (B, C, H, W)
|
| 1219 |
+
pad_h, pad_w: размеры паддинга
|
| 1220 |
+
gradient_center_x: центр по X (0.0-1.0, default 0.5)
|
| 1221 |
+
gradient_center_y: центр по Y (0.0-1.0, default 0.5)
|
| 1222 |
+
gradient_radius: радиус градиента (0.1-2.0, default 1.0)
|
| 1223 |
+
debug: вывод отладки
|
| 1224 |
+
|
| 1225 |
+
Returns:
|
| 1226 |
+
torch.Tensor: padded тензор с радиальным градиентом
|
| 1227 |
+
"""
|
| 1228 |
+
b, c, h, w = input_tensor.shape
|
| 1229 |
+
device = input_tensor.device
|
| 1230 |
+
dtype = input_tensor.dtype
|
| 1231 |
+
|
| 1232 |
+
# Валидация параметров
|
| 1233 |
+
gradient_center_x = float(max(0.0, min(1.0, gradient_center_x)))
|
| 1234 |
+
gradient_center_y = float(max(0.0, min(1.0, gradient_center_y)))
|
| 1235 |
+
gradient_radius = float(max(0.1, min(2.0, gradient_radius)))
|
| 1236 |
+
|
| 1237 |
+
if debug:
|
| 1238 |
+
print(f"[Gradient Radial] Center: ({gradient_center_x:.2f}, {gradient_center_y:.2f}), "
|
| 1239 |
+
f"Radius: {gradient_radius:.2f}")
|
| 1240 |
+
|
| 1241 |
+
# Размеры с паддингом
|
| 1242 |
+
canvas_h = h + 2 * pad_h
|
| 1243 |
+
canvas_w = w + 2 * pad_w
|
| 1244 |
+
|
| 1245 |
+
# Координаты центра градиента
|
| 1246 |
+
center_y = gradient_center_y * canvas_h
|
| 1247 |
+
center_x = gradient_center_x * canvas_w
|
| 1248 |
+
|
| 1249 |
+
# Создаем координатную сетку
|
| 1250 |
+
y = torch.arange(canvas_h, device=device, dtype=dtype).view(-1, 1)
|
| 1251 |
+
x = torch.arange(canvas_w, device=device, dtype=dtype).view(1, -1)
|
| 1252 |
+
|
| 1253 |
+
# Расстояние от центра (нормализованное)
|
| 1254 |
+
max_dist = math.sqrt(canvas_h**2 + canvas_w**2) / 2.0
|
| 1255 |
+
dist = torch.sqrt((y - center_y)**2 + (x - center_x)**2) / max_dist
|
| 1256 |
+
|
| 1257 |
+
# Радиальный градиент [0, 1]
|
| 1258 |
+
gradient = torch.clamp(1.0 - (dist / gradient_radius), 0.0, 1.0)
|
| 1259 |
+
gradient = gradient.unsqueeze(0).unsqueeze(0) # (1, 1, canvas_h, canvas_w)
|
| 1260 |
+
|
| 1261 |
+
# Circular padding для входного тензора
|
| 1262 |
+
padded = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1263 |
+
|
| 1264 |
+
# vUltimate FIX: Безопасный expand с try/except
|
| 1265 |
+
try:
|
| 1266 |
+
gradient_expanded = gradient.expand(b, c, canvas_h, canvas_w)
|
| 1267 |
+
except RuntimeError as e:
|
| 1268 |
+
if debug:
|
| 1269 |
+
print(f"⚠️ [Gradient Radial] Expand failed: {e}, using broadcast_to")
|
| 1270 |
+
# Fallback: используем broadcast_to
|
| 1271 |
+
gradient_expanded = torch.broadcast_to(gradient, (b, c, canvas_h, canvas_w))
|
| 1272 |
+
|
| 1273 |
+
# Применяем градиент (плавный переход к circular padding на краях)
|
| 1274 |
+
result = padded * gradient_expanded
|
| 1275 |
+
|
| 1276 |
+
if debug:
|
| 1277 |
+
print(f"[Gradient Radial] Gradient range: [{gradient.min().item():.3f}, {gradient.max().item():.3f}]")
|
| 1278 |
+
|
| 1279 |
+
return result
|
| 1280 |
+
|
| 1281 |
+
|
| 1282 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 1283 |
+
# 3.7. NOISE BLEND (V3.1 - НОВАЯ ФУНКЦИЯ)
|
| 1284 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 1285 |
+
|
| 1286 |
+
def apply_noise_blend(input_tensor, pad_h, pad_w, noise_scale=5.0,
|
| 1287 |
+
noise_octaves=2, debug=False):
|
| 1288 |
+
"""
|
| 1289 |
+
Блендинг с процедурным шумом для органичных границ.
|
| 1290 |
+
|
| 1291 |
+
V3.1: НОВАЯ ФУНКЦИЯ
|
| 1292 |
+
V3.1.1: КРИТИЧНЫЕ ИСПРАВЛЕНИЯ ДЛЯ FLOAT16
|
| 1293 |
+
- Адаптивный epsilon для нормализации
|
| 1294 |
+
- Упрощенная формула для blend (оптимизация)
|
| 1295 |
+
|
| 1296 |
+
Args:
|
| 1297 |
+
input_tensor: входной латент (B, C, H, W)
|
| 1298 |
+
pad_h, pad_w: размеры паддинга
|
| 1299 |
+
noise_scale: масштаб шума (1.0-10.0, default 5.0)
|
| 1300 |
+
noise_octaves: количество октав шума (1-4, default 2)
|
| 1301 |
+
debug: вывод отладки
|
| 1302 |
+
|
| 1303 |
+
Returns:
|
| 1304 |
+
torch.Tensor: padded тензор с noise blending
|
| 1305 |
+
"""
|
| 1306 |
+
b, c, h, w = input_tensor.shape
|
| 1307 |
+
device = input_tensor.device
|
| 1308 |
+
dtype = input_tensor.dtype
|
| 1309 |
+
|
| 1310 |
+
# V3.1.1 FIX: Получаем адаптивный epsilon
|
| 1311 |
+
eps = get_adaptive_epsilon(dtype)
|
| 1312 |
+
|
| 1313 |
+
# Валидация параметров
|
| 1314 |
+
noise_scale = float(max(1.0, min(10.0, noise_scale)))
|
| 1315 |
+
noise_octaves = int(max(1, min(4, noise_octaves)))
|
| 1316 |
+
|
| 1317 |
+
if debug:
|
| 1318 |
+
print(f"[Noise Blend] Scale: {noise_scale:.2f}, Octaves: {noise_octaves}, Epsilon: {eps}")
|
| 1319 |
+
|
| 1320 |
+
# Размеры с паддингом
|
| 1321 |
+
canvas_h = h + 2 * pad_h
|
| 1322 |
+
canvas_w = w + 2 * pad_w
|
| 1323 |
+
|
| 1324 |
+
# Создаем координатную сетку
|
| 1325 |
+
y = torch.arange(canvas_h, device=device, dtype=dtype).view(-1, 1)
|
| 1326 |
+
x = torch.arange(canvas_w, device=device, dtype=dtype).view(1, -1)
|
| 1327 |
+
|
| 1328 |
+
# Многооктавный Perlin-style шум
|
| 1329 |
+
noise_mask = torch.zeros(canvas_h, canvas_w, device=device, dtype=dtype)
|
| 1330 |
+
amplitude = 1.0
|
| 1331 |
+
frequency = 1.0
|
| 1332 |
+
|
| 1333 |
+
for octave in range(noise_octaves):
|
| 1334 |
+
# Простой процедурный шум через sin/cos
|
| 1335 |
+
phase_x = x * frequency * noise_scale / canvas_w * 2 * math.pi
|
| 1336 |
+
phase_y = y * frequency * noise_scale / canvas_h * 2 * math.pi
|
| 1337 |
+
|
| 1338 |
+
octave_noise = torch.sin(phase_x + octave) * torch.cos(phase_y + octave * 0.7)
|
| 1339 |
+
noise_mask = noise_mask + octave_noise * amplitude
|
| 1340 |
+
|
| 1341 |
+
amplitude *= 0.5
|
| 1342 |
+
frequency *= 2.0
|
| 1343 |
+
|
| 1344 |
+
# V3.1.1 FIX: Нормализация в [0, 1] с адаптивным epsilon
|
| 1345 |
+
noise_mask = (noise_mask - noise_mask.min()) / (noise_mask.max() - noise_mask.min() + eps)
|
| 1346 |
+
|
| 1347 |
+
# Расстояние от контента (для комбинирования с шумом)
|
| 1348 |
+
content_box = (pad_h, pad_h + h, pad_w, pad_w + w)
|
| 1349 |
+
distance = create_distance_map(canvas_h, canvas_w, content_box, device, dtype)
|
| 1350 |
+
|
| 1351 |
+
# V3.1.1 FIX: Явное преобразование размеров для ясности
|
| 1352 |
+
distance_2d = distance.squeeze(0).squeeze(0) # (canvas_h, canvas_w)
|
| 1353 |
+
|
| 1354 |
+
# Комбинируем distance с noise (больше шума на краях)
|
| 1355 |
+
blend_mask = 0.7 * distance_2d + 0.3 * noise_mask
|
| 1356 |
+
blend_mask = torch.clamp(blend_mask, 0.0, 1.0).unsqueeze(0).unsqueeze(0)
|
| 1357 |
+
|
| 1358 |
+
# Circular padding
|
| 1359 |
+
padded = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1360 |
+
|
| 1361 |
+
# vUltimate FIX: Безопасный expand с try/except
|
| 1362 |
+
try:
|
| 1363 |
+
blend_mask_expanded = blend_mask.expand(b, c, canvas_h, canvas_w)
|
| 1364 |
+
except RuntimeError as e:
|
| 1365 |
+
if debug:
|
| 1366 |
+
print(f"⚠️ [Noise Blend] Expand failed: {e}, using broadcast_to")
|
| 1367 |
+
# Fallback: используем broadcast_to
|
| 1368 |
+
blend_mask_expanded = torch.broadcast_to(blend_mask, (b, c, canvas_h, canvas_w))
|
| 1369 |
+
|
| 1370 |
+
# V3.1.1 FIX: Упрощенная формула для blend
|
| 1371 |
+
# Было: padded * (1.0 - blend_mask) + padded * blend_mask * 0.5
|
| 1372 |
+
# Упрощено: padded * (1.0 - 0.5 * blend_mask)
|
| 1373 |
+
result = padded * (1.0 - 0.5 * blend_mask_expanded)
|
| 1374 |
+
|
| 1375 |
+
if debug:
|
| 1376 |
+
print(f"[Noise Blend] Mask range: [{blend_mask.min().item():.3f}, {blend_mask.max().item():.3f}]")
|
| 1377 |
+
|
| 1378 |
+
return result
|
| 1379 |
+
|
| 1380 |
+
|
| 1381 |
+
# ═══════════════════════════════════════════════════════════════════════════
|
| 1382 |
+
# 4. ГЛАВНАЯ ФУНКЦИЯ (V3.0 - УЛУЧШЕНО)
|
| 1383 |
+
# ════════════════════════════��══════════════════════════════════════════════
|
| 1384 |
+
|
| 1385 |
+
def validate_zoom_params(params):
|
| 1386 |
+
"""
|
| 1387 |
+
V3.0+: нормализация zoom-параметров с сохранением старых и новых alias-ключей.
|
| 1388 |
+
|
| 1389 |
+
Критично для latent/router пути:
|
| 1390 |
+
- сохраняем zoom_engine (раньше терялся и backend silently откатывался в auto)
|
| 1391 |
+
- выравниваем edge_fade <-> zoom_in_fade
|
| 1392 |
+
- выравниваем zoom_fade_to_black/zoom_fade_strength <-> fade_to_black/fade_strength
|
| 1393 |
+
- принимаем debug_mode и debug
|
| 1394 |
+
"""
|
| 1395 |
+
z_mode = params.get('zoom_mode', 'outpaint_zoom')
|
| 1396 |
+
try:
|
| 1397 |
+
zoom_mode = z_mode if isinstance(z_mode, ZoomMode) else ZoomMode(z_mode)
|
| 1398 |
+
except Exception:
|
| 1399 |
+
zoom_mode = ZoomMode.OUTPAINT_ZOOM
|
| 1400 |
+
|
| 1401 |
+
b_mode = params.get('blend_mode', 'circular_reflect')
|
| 1402 |
+
try:
|
| 1403 |
+
blend_mode = b_mode if isinstance(b_mode, BlendMode) else BlendMode(b_mode)
|
| 1404 |
+
except Exception:
|
| 1405 |
+
blend_mode = BlendMode.CIRCULAR_REFLECT
|
| 1406 |
+
|
| 1407 |
+
edge_fade = _coerce_bool_param(params.get('edge_fade', params.get('zoom_in_fade', True)), True)
|
| 1408 |
+
fade_to_black = _coerce_bool_param(params.get('zoom_fade_to_black', params.get('fade_to_black', False)), False)
|
| 1409 |
+
fade_strength = float(params.get('zoom_fade_strength', params.get('fade_strength', 0.3)))
|
| 1410 |
+
debug_value = _coerce_bool_param(params.get('debug_mode', params.get('debug', False)), False)
|
| 1411 |
+
|
| 1412 |
+
return {
|
| 1413 |
+
'zoom_engine': str(params.get('zoom_engine', 'auto')),
|
| 1414 |
+
'zoom_factor': float(params.get('zoom_factor', 0.0)),
|
| 1415 |
+
'zoom_mode': zoom_mode,
|
| 1416 |
+
'blend_mode': blend_mode,
|
| 1417 |
+
'convergence_point': float(params.get('convergence_point', 0.5)),
|
| 1418 |
+
'convergence_y': float(params.get('convergence_y', 0.5)),
|
| 1419 |
+
'depth_power': float(params.get('depth_power', 1.0)),
|
| 1420 |
+
'blend_falloff': str(params.get('blend_falloff', 'smoothstep')),
|
| 1421 |
+
'blend_sharpness': float(params.get('blend_sharpness', 1.0)),
|
| 1422 |
+
'blend_width': params.get('blend_width', None),
|
| 1423 |
+
'pan_x': float(params.get('pan_x', params.get('x_pan', 0.0))),
|
| 1424 |
+
'pan_y': float(params.get('pan_y', params.get('y_pan', 0.0))),
|
| 1425 |
+
'edge_fade': edge_fade,
|
| 1426 |
+
'zoom_in_fade': edge_fade,
|
| 1427 |
+
'zoom_fade_to_black': fade_to_black,
|
| 1428 |
+
'zoom_fade_strength': fade_strength,
|
| 1429 |
+
'fade_to_black': fade_to_black,
|
| 1430 |
+
'fade_strength': fade_strength,
|
| 1431 |
+
'fade_edge_strength': float(params.get('fade_edge_strength', 0.15)),
|
| 1432 |
+
'noise_strength': float(params.get('noise_strength', 1.0)),
|
| 1433 |
+
'interp_mode': str(params.get('interp_mode', 'bilinear')),
|
| 1434 |
+
'variance_correction': _coerce_bool_param(params.get('variance_correction', True), True),
|
| 1435 |
+
'auto_clamp_pan': _coerce_bool_param(params.get('auto_clamp_pan', True), True),
|
| 1436 |
+
'adaptive_noise_scale': _coerce_bool_param(params.get('adaptive_noise_scale', True), True),
|
| 1437 |
+
'debug': debug_value,
|
| 1438 |
+
'debug_mode': debug_value,
|
| 1439 |
+
'spiral_rotation': float(params.get('spiral_rotation', 0.5)),
|
| 1440 |
+
'spiral_direction': float(params.get('spiral_direction', 1.0)),
|
| 1441 |
+
'gradient_center_x': float(params.get('gradient_center_x', 0.5)),
|
| 1442 |
+
'gradient_center_y': float(params.get('gradient_center_y', 0.5)),
|
| 1443 |
+
'gradient_radius': float(params.get('gradient_radius', 1.0)),
|
| 1444 |
+
'noise_scale': float(params.get('noise_scale', 5.0)),
|
| 1445 |
+
'noise_octaves': int(params.get('noise_octaves', 2)),
|
| 1446 |
+
}
|
| 1447 |
+
def apply_unified_zoom(input_tensor, pad_h, pad_w, zoom_factor=0.0,
|
| 1448 |
+
zoom_mode=ZoomMode.OUTPAINT_ZOOM,
|
| 1449 |
+
blend_mode=BlendMode.CIRCULAR_REFLECT,
|
| 1450 |
+
convergence_point=0.5, convergence_y=0.5,
|
| 1451 |
+
depth_power=1.0,
|
| 1452 |
+
blend_falloff='smoothstep', blend_sharpness=1.0,
|
| 1453 |
+
blend_width=None,
|
| 1454 |
+
pan_x=0.0, pan_y=0.0,
|
| 1455 |
+
fade_to_black=False, fade_strength=0.3,
|
| 1456 |
+
fade_edge_strength=0.15,
|
| 1457 |
+
noise_strength=1.0,
|
| 1458 |
+
interp_mode='bilinear',
|
| 1459 |
+
zoom_in_fade=True,
|
| 1460 |
+
variance_correction=True,
|
| 1461 |
+
auto_clamp_pan=True,
|
| 1462 |
+
adaptive_noise_scale=True,
|
| 1463 |
+
debug=False,
|
| 1464 |
+
extra_params=None):
|
| 1465 |
+
|
| 1466 |
+
# 1. Запоминаем оригинальный тип (скорее всего float16)
|
| 1467 |
+
original_dtype = input_tensor.dtype
|
| 1468 |
+
|
| 1469 |
+
# 2. ПРИНУДИТЕЛЬНО ПЕРЕВОДИМ В FLOAT32 для вычислений
|
| 1470 |
+
# Это предотвращает появление "кислотного шума" (NaN/Inf)
|
| 1471 |
+
input_tensor = input_tensor.float()
|
| 1472 |
+
|
| 1473 |
+
# Защита от нулевого zoom
|
| 1474 |
+
is_active = (abs(zoom_factor) > 0.001) or (abs(pan_x) > 0.001) or (abs(pan_y) > 0.001)
|
| 1475 |
+
|
| 1476 |
+
if not is_active:
|
| 1477 |
+
result = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1478 |
+
# Возвращаем в исходном типе
|
| 1479 |
+
return result.to(dtype=original_dtype)
|
| 1480 |
+
|
| 1481 |
+
if debug:
|
| 1482 |
+
print(f"[Unified Zoom] Mode: {zoom_mode}, Dtype safe cast: {original_dtype} -> float32")
|
| 1483 |
+
|
| 1484 |
+
# Переменная для результата
|
| 1485 |
+
result = None
|
| 1486 |
+
|
| 1487 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 1488 |
+
# ВЫЗОВ ФУНКЦИЙ (Теперь напрямую, так как они в этом же файле)
|
| 1489 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 1490 |
+
|
| 1491 |
+
if zoom_mode == ZoomMode.OUTPAINT_ZOOM:
|
| 1492 |
+
result = apply_outpaint_zoom(
|
| 1493 |
+
input_tensor,
|
| 1494 |
+
zoom_factor,
|
| 1495 |
+
pad_h, pad_w,
|
| 1496 |
+
convergence=convergence_point,
|
| 1497 |
+
convergence_y=convergence_y,
|
| 1498 |
+
fade_strength=fade_strength,
|
| 1499 |
+
depth_power=depth_power,
|
| 1500 |
+
pan_x=pan_x, pan_y=pan_y,
|
| 1501 |
+
fade_to_black=fade_to_black,
|
| 1502 |
+
fade_edge_strength=fade_edge_strength,
|
| 1503 |
+
blend_mode=blend_mode.value if isinstance(blend_mode, BlendMode) else str(blend_mode),
|
| 1504 |
+
noise_strength=noise_strength,
|
| 1505 |
+
interp_mode=interp_mode,
|
| 1506 |
+
zoom_in_fade=zoom_in_fade,
|
| 1507 |
+
variance_correction=variance_correction,
|
| 1508 |
+
auto_clamp_pan=auto_clamp_pan,
|
| 1509 |
+
adaptive_noise_scale=adaptive_noise_scale,
|
| 1510 |
+
debug=debug,
|
| 1511 |
+
extra_params=extra_params
|
| 1512 |
+
)
|
| 1513 |
+
|
| 1514 |
+
elif zoom_mode == ZoomMode.GRID_WARP:
|
| 1515 |
+
x_warped = apply_grid_warp_zoom(
|
| 1516 |
+
input_tensor,
|
| 1517 |
+
zoom_factor,
|
| 1518 |
+
convergence_point,
|
| 1519 |
+
depth_power,
|
| 1520 |
+
pan_x, pan_y,
|
| 1521 |
+
convergence_y,
|
| 1522 |
+
interp_mode=interp_mode,
|
| 1523 |
+
debug=debug
|
| 1524 |
+
)
|
| 1525 |
+
result = F.pad(x_warped, (pad_w, pad_w, pad_h, pad_h), mode='constant', value=0)
|
| 1526 |
+
|
| 1527 |
+
elif zoom_mode == ZoomMode.SPIRAL_ZOOM:
|
| 1528 |
+
spiral_rotation = 0.5
|
| 1529 |
+
spiral_direction = 1.0
|
| 1530 |
+
|
| 1531 |
+
if extra_params:
|
| 1532 |
+
spiral_rotation = extra_params.get('spiral_rotation', 0.5)
|
| 1533 |
+
spiral_direction = extra_params.get('spiral_direction', 1.0)
|
| 1534 |
+
|
| 1535 |
+
result = apply_spiral_zoom(
|
| 1536 |
+
input_tensor,
|
| 1537 |
+
zoom_factor,
|
| 1538 |
+
pad_h, pad_w,
|
| 1539 |
+
spiral_rotation=spiral_rotation,
|
| 1540 |
+
spiral_direction=spiral_direction,
|
| 1541 |
+
interp_mode=interp_mode,
|
| 1542 |
+
debug=debug
|
| 1543 |
+
)
|
| 1544 |
+
|
| 1545 |
+
elif zoom_mode in [ZoomMode.CONVERGENCE_SHIFT, ZoomMode.HYBRID, ZoomMode.BLEND_TRANSITION]:
|
| 1546 |
+
x_shifted = apply_legacy_shift_zoom(
|
| 1547 |
+
input_tensor,
|
| 1548 |
+
zoom_factor,
|
| 1549 |
+
convergence_point,
|
| 1550 |
+
depth_power,
|
| 1551 |
+
pan_x, pan_y,
|
| 1552 |
+
auto_clamp_pan=auto_clamp_pan,
|
| 1553 |
+
debug=debug
|
| 1554 |
+
)
|
| 1555 |
+
|
| 1556 |
+
if zoom_mode == ZoomMode.CONVERGENCE_SHIFT:
|
| 1557 |
+
padded = F.pad(x_shifted, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1558 |
+
if fade_to_black:
|
| 1559 |
+
# Импорт только если нужен (для совместимости)
|
| 1560 |
+
try:
|
| 1561 |
+
from improved_tiling_functions import compute_blend_fade_to_black
|
| 1562 |
+
padded = compute_blend_fade_to_black(padded, pad_h, pad_w, fade_strength)
|
| 1563 |
+
except ImportError:
|
| 1564 |
+
pass
|
| 1565 |
+
result = padded
|
| 1566 |
+
else:
|
| 1567 |
+
# Fallback для остальных режимов
|
| 1568 |
+
try:
|
| 1569 |
+
from improved_tiling_functions import compute_advanced_blend_padding
|
| 1570 |
+
mode_str = blend_mode.value if isinstance(blend_mode, BlendMode) else str(blend_mode)
|
| 1571 |
+
mode_adv = mode_str.split('_')[0] if '_' in mode_str else 'circular'
|
| 1572 |
+
|
| 1573 |
+
result = compute_advanced_blend_padding(
|
| 1574 |
+
x_shifted, pad_h, pad_w,
|
| 1575 |
+
mode_simple='replicate',
|
| 1576 |
+
mode_advanced=mode_adv,
|
| 1577 |
+
blend_strength=0.7,
|
| 1578 |
+
blend_width=blend_width,
|
| 1579 |
+
falloff_curve=blend_falloff,
|
| 1580 |
+
edge_sharpness=blend_sharpness,
|
| 1581 |
+
fade_to_black=fade_to_black,
|
| 1582 |
+
fade_strength=fade_strength
|
| 1583 |
+
)
|
| 1584 |
+
|
| 1585 |
+
# ═══════════════════════════════════════════════════════════════
|
| 1586 |
+
# КРИТИЧЕСКОЕ ИСПРАВЛЕНИЕ: Защита от переполнения float16
|
| 1587 |
+
# ═══════════════════════════════════════════════════════════════
|
| 1588 |
+
# Это исправление устраняет "серый шум"!
|
| 1589 |
+
#
|
| 1590 |
+
# Проблема: compute_advanced_blend_padding может создать значения
|
| 1591 |
+
# за пределами диапазона float16 (-65504 до 65504).
|
| 1592 |
+
# При конвертации float32→float16 они превращаются в Inf/NaN → шум
|
| 1593 |
+
#
|
| 1594 |
+
# Решение: Сначала очищаем и ограничиваем В FLOAT32,
|
| 1595 |
+
# затем безопасно конвертируем в float16
|
| 1596 |
+
# ═══════════════════════════════════════════════════════════════
|
| 1597 |
+
|
| 1598 |
+
if original_dtype == torch.float16:
|
| 1599 |
+
# Шаг 1: Очищаем NaN/Inf в float32 (пока значения не испорчены)
|
| 1600 |
+
result = torch.nan_to_num(result, nan=0.0, posinf=65504.0, neginf=-65504.0)
|
| 1601 |
+
|
| 1602 |
+
# Шаг 2: Ограничиваем диапазон значений ПЕРЕД конвертацией
|
| 1603 |
+
# float16 range: -65504 to 65504
|
| 1604 |
+
result = torch.clamp(result, min=-65504.0, max=65504.0)
|
| 1605 |
+
|
| 1606 |
+
if debug:
|
| 1607 |
+
print(f"[Unified Zoom] Float16 safety: clamped to [-65504, 65504]")
|
| 1608 |
+
|
| 1609 |
+
except ImportError:
|
| 1610 |
+
result = F.pad(x_shifted, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1611 |
+
|
| 1612 |
+
# Fallback если режим не найден
|
| 1613 |
+
if result is None:
|
| 1614 |
+
result = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 1615 |
+
|
| 1616 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 1617 |
+
# 3. БЕЗОПАСНАЯ КОНВЕРТАЦИЯ ОБРАТНО В ИСХОДНЫЙ ТИП
|
| 1618 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 1619 |
+
# Теперь, когда все опасные значения очищены и ограничены,
|
| 1620 |
+
# можно безопасно конвертировать в float16
|
| 1621 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 1622 |
+
|
| 1623 |
+
if torch.is_tensor(result) and result.dtype != original_dtype:
|
| 1624 |
+
# Если мы ещё не применили защиту (для других режимов кроме HYBRID/BLEND_TRANSITION)
|
| 1625 |
+
if original_dtype == torch.float16:
|
| 1626 |
+
# Финальная очистка перед конвертацией (на всякий случай)
|
| 1627 |
+
result = torch.nan_to_num(result, nan=0.0, posinf=65504.0, neginf=-65504.0)
|
| 1628 |
+
result = torch.clamp(result, min=-65504.0, max=65504.0)
|
| 1629 |
+
|
| 1630 |
+
# Теперь конвертируем - безопасно!
|
| 1631 |
+
result = result.to(dtype=original_dtype)
|
| 1632 |
+
|
| 1633 |
+
return result
|
asds/libs/improved_tiling_functions.py
ADDED
|
@@ -0,0 +1,644 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
import math
|
| 4 |
+
import numpy as np
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
|
| 7 |
+
# =======================================================================
|
| 8 |
+
# vUltimate - REAL Deep Code Audit
|
| 9 |
+
# Based on v13 (THE_last_version) with CRITICAL FIXES
|
| 10 |
+
# =======================================================================
|
| 11 |
+
|
| 12 |
+
# =======================================================================
|
| 13 |
+
# BOOL NORMALIZATION HELPERS
|
| 14 |
+
# =======================================================================
|
| 15 |
+
|
| 16 |
+
def _coerce_bool_param(value, default=False):
|
| 17 |
+
"""Robust bool parsing for UI values, presets and metadata strings."""
|
| 18 |
+
if value is None:
|
| 19 |
+
return bool(default)
|
| 20 |
+
if isinstance(value, bool):
|
| 21 |
+
return value
|
| 22 |
+
if isinstance(value, (int, float)):
|
| 23 |
+
return value != 0
|
| 24 |
+
if isinstance(value, str):
|
| 25 |
+
s = value.strip().lower()
|
| 26 |
+
if s in {'1', 'true', 'yes', 'y', 'on'}:
|
| 27 |
+
return True
|
| 28 |
+
if s in {'0', 'false', 'no', 'n', 'off', 'none', 'null', ''}:
|
| 29 |
+
return False
|
| 30 |
+
return bool(value)
|
| 31 |
+
|
| 32 |
+
# =======================================================================
|
| 33 |
+
# 1. SMART CACHING (Speed optimization ~15-20%)
|
| 34 |
+
# =======================================================================
|
| 35 |
+
class SmartMaskCache:
|
| 36 |
+
"""LRU cache for blend masks to avoid regeneration"""
|
| 37 |
+
def __init__(self, max_size=50):
|
| 38 |
+
self.cache = OrderedDict()
|
| 39 |
+
self.max_size = max_size
|
| 40 |
+
|
| 41 |
+
def get(self, key):
|
| 42 |
+
if key in self.cache:
|
| 43 |
+
self.cache.move_to_end(key)
|
| 44 |
+
return self.cache[key]
|
| 45 |
+
return None
|
| 46 |
+
|
| 47 |
+
def set(self, key, value):
|
| 48 |
+
if key in self.cache:
|
| 49 |
+
self.cache.move_to_end(key)
|
| 50 |
+
self.cache[key] = value
|
| 51 |
+
if len(self.cache) > self.max_size:
|
| 52 |
+
self.cache.popitem(last=False)
|
| 53 |
+
|
| 54 |
+
# Global cache instance
|
| 55 |
+
_MASK_CACHE = SmartMaskCache()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# =======================================================================
|
| 59 |
+
# 2. SAFE EPSILON (🔴 CRITICAL FIX: Infinite recursion bug in v13!)
|
| 60 |
+
# =======================================================================
|
| 61 |
+
def get_safe_epsilon(tensor_or_dtype):
|
| 62 |
+
"""
|
| 63 |
+
Float16-safe epsilon - CRITICAL for half precision!
|
| 64 |
+
|
| 65 |
+
🔴 vUltimate Fix: v13 had INFINITE RECURSION bug:
|
| 66 |
+
Line 51: return get_safe_epsilon(torch.float16) # INFINITE LOOP!
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
tensor_or_dtype: torch.Tensor or torch.dtype
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
float: safe epsilon for given dtype
|
| 73 |
+
"""
|
| 74 |
+
if isinstance(tensor_or_dtype, torch.Tensor):
|
| 75 |
+
dtype = tensor_or_dtype.dtype
|
| 76 |
+
else:
|
| 77 |
+
dtype = tensor_or_dtype
|
| 78 |
+
|
| 79 |
+
# Float16 minimum value ~6e-5, so 1e-6 causes underflow
|
| 80 |
+
if dtype in (torch.float16, torch.bfloat16):
|
| 81 |
+
return 1e-3 # Safe for half precision
|
| 82 |
+
elif dtype == torch.float32:
|
| 83 |
+
return 1e-6 # 🔴 FIX: Changed from recursive call to direct value
|
| 84 |
+
else:
|
| 85 |
+
return 1e-12 # High precision for float64
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# =======================================================================
|
| 89 |
+
# 3. LATENT COLOR FIX (Variance-preserving blend)
|
| 90 |
+
# =======================================================================
|
| 91 |
+
def blend_with_variance_fix(a, b, mask):
|
| 92 |
+
"""
|
| 93 |
+
Mathematically correct blending of latent noise.
|
| 94 |
+
✅ FLOAT16 FIX: Safe sqrt with adaptive epsilon
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
a: Primary layer (active where mask=1) -> Advanced/Circular
|
| 98 |
+
b: Background layer (active where mask=0) -> Simple/Replicate
|
| 99 |
+
mask: Blend mask [0,1]
|
| 100 |
+
|
| 101 |
+
🔴 IMPORTANT: From v11+ the mask semantics are:
|
| 102 |
+
mask=1.0 on EDGES (where Advanced padding is needed)
|
| 103 |
+
mask=0.0 in CENTER (where content or Simple padding is)
|
| 104 |
+
"""
|
| 105 |
+
# 1. Linear blend
|
| 106 |
+
blended = a * mask + b * (1 - mask)
|
| 107 |
+
|
| 108 |
+
# 2. Variance correction with adaptive epsilon
|
| 109 |
+
eps_val = get_safe_epsilon(mask.dtype)
|
| 110 |
+
variance_fix = torch.sqrt(mask**2 + (1 - mask)**2 + eps_val)
|
| 111 |
+
|
| 112 |
+
return blended / variance_fix
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# =======================================================================
|
| 116 |
+
# 4. LEGACY FADE TO BLACK (For ZOOM effect) ✅
|
| 117 |
+
# =======================================================================
|
| 118 |
+
def compute_blend_fade_to_black(padded, pad_h, pad_w, fade_strength=0.1):
|
| 119 |
+
"""
|
| 120 |
+
⚡ LEGACY MODE for Zoom effect (V3.5 logic from v11-v13) ⚡
|
| 121 |
+
|
| 122 |
+
Gradient now covers ENTIRE padding + part of content.
|
| 123 |
+
Result: Beautiful vignette from 0 (edge) to 1 (center).
|
| 124 |
+
|
| 125 |
+
🔴 vUltimate Note: This is v13 logic (NOT v7 logic).
|
| 126 |
+
v7 applied fade only to content inside padding zones.
|
| 127 |
+
v13 applies fade to ENTIRE image including padding.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
padded: Already padded tensor [B, C, H, W]
|
| 131 |
+
pad_h: Vertical padding size
|
| 132 |
+
pad_w: Horizontal padding size
|
| 133 |
+
fade_strength: Fade depth into content (0.0-1.0), typically 0.05-0.2
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
Tensor with darkened edges
|
| 137 |
+
"""
|
| 138 |
+
b, c, H, W = padded.shape
|
| 139 |
+
|
| 140 |
+
# Calculate content size (without padding)
|
| 141 |
+
h_content = max(H - 2 * pad_h, 0)
|
| 142 |
+
w_content = max(W - 2 * pad_w, 0)
|
| 143 |
+
|
| 144 |
+
# Fade depth inside content
|
| 145 |
+
blend_in_h = int(h_content * fade_strength)
|
| 146 |
+
blend_in_w = int(w_content * fade_strength)
|
| 147 |
+
|
| 148 |
+
# Total fade zone = Padding + Entry into content
|
| 149 |
+
total_fade_h = pad_h + blend_in_h
|
| 150 |
+
total_fade_w = pad_w + blend_in_w
|
| 151 |
+
|
| 152 |
+
result = padded.clone()
|
| 153 |
+
|
| 154 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 155 |
+
# VERTICAL EDGES
|
| 156 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 157 |
+
if total_fade_h > 0:
|
| 158 |
+
# Create gradient 0 -> 1
|
| 159 |
+
fade = torch.linspace(0, 1, steps=total_fade_h,
|
| 160 |
+
device=padded.device, dtype=padded.dtype)
|
| 161 |
+
fade = fade.view(1, 1, -1, 1) # Shape: (1,1,H,1)
|
| 162 |
+
|
| 163 |
+
# Top (from 0 to total_fade_h)
|
| 164 |
+
safe_h = min(total_fade_h, H)
|
| 165 |
+
result[:, :, :safe_h, :] *= fade[:, :, :safe_h, :]
|
| 166 |
+
|
| 167 |
+
# Bottom (from H-total_fade_h to H) - use flipped gradient
|
| 168 |
+
result[:, :, -safe_h:, :] *= fade[:, :, :safe_h, :].flip(2)
|
| 169 |
+
|
| 170 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 171 |
+
# HORIZONTAL EDGES
|
| 172 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 173 |
+
if total_fade_w > 0:
|
| 174 |
+
# Create gradient 0 -> 1
|
| 175 |
+
fade = torch.linspace(0, 1, steps=total_fade_w,
|
| 176 |
+
device=padded.device, dtype=padded.dtype)
|
| 177 |
+
fade = fade.view(1, 1, 1, -1) # Shape: (1,1,1,W)
|
| 178 |
+
|
| 179 |
+
# Left
|
| 180 |
+
safe_w = min(total_fade_w, W)
|
| 181 |
+
result[:, :, :, :safe_w] *= fade[:, :, :, :safe_w]
|
| 182 |
+
|
| 183 |
+
# Right
|
| 184 |
+
result[:, :, :, -safe_w:] *= fade[:, :, :, :safe_w].flip(3)
|
| 185 |
+
|
| 186 |
+
return result
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# =======================================================================
|
| 190 |
+
# 5. ADVANCED BLEND MASK (For modern tiling mode)
|
| 191 |
+
# =======================================================================
|
| 192 |
+
def create_advanced_blend_mask(h, w, blend_width, device, dtype=torch.float32,
|
| 193 |
+
falloff_curve="smoothstep", edge_sharpness=1.0):
|
| 194 |
+
"""
|
| 195 |
+
Creates cached edge blend mask.
|
| 196 |
+
|
| 197 |
+
🔴 MASK SEMANTICS (v11+ convention):
|
| 198 |
+
1.0 = on the very EDGE (where Advanced Padding is needed)
|
| 199 |
+
0.0 = in CENTER (where content or Simple Padding is)
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
h, w: Mask dimensions
|
| 203 |
+
blend_width: Transition zone width (pixels)
|
| 204 |
+
device: Torch device
|
| 205 |
+
dtype: Data type
|
| 206 |
+
falloff_curve: Curve type ('linear', 'smoothstep', 'cosine')
|
| 207 |
+
edge_sharpness: Edge sharpness (1.0 = normal, >1 = sharper, <1 = softer)
|
| 208 |
+
|
| 209 |
+
Returns:
|
| 210 |
+
Mask of size [1, 1, h, w]
|
| 211 |
+
"""
|
| 212 |
+
if blend_width <= 0:
|
| 213 |
+
# Defensive behaviour for invalid/manual callers: no transition zone.
|
| 214 |
+
# Normal UI flow uses 0=None(auto) before reaching this helper. Returning
|
| 215 |
+
# zeros is much safer than blending the entire frame as 'advanced'.
|
| 216 |
+
return torch.zeros(1, 1, h, w, device=device, dtype=dtype)
|
| 217 |
+
|
| 218 |
+
# BUG FIX 6a: normalise falloff_curve to a known value; warn loudly if
|
| 219 |
+
# the UI has sent something the backend doesn't actually implement.
|
| 220 |
+
_KNOWN_FALLOFFS = {'linear', 'smoothstep', 'cosine'}
|
| 221 |
+
if falloff_curve not in _KNOWN_FALLOFFS:
|
| 222 |
+
print(f"[AdvancedBlend] Warning: unsupported falloff_curve '{falloff_curve}' "
|
| 223 |
+
f"— falling back to 'smoothstep'. Supported: {sorted(_KNOWN_FALLOFFS)}")
|
| 224 |
+
falloff_curve = 'smoothstep'
|
| 225 |
+
|
| 226 |
+
blend_w = min(blend_width, w // 2)
|
| 227 |
+
blend_h = min(blend_width, h // 2)
|
| 228 |
+
|
| 229 |
+
mask = torch.zeros((1, 1, h, w), device=device, dtype=dtype)
|
| 230 |
+
|
| 231 |
+
def get_ramp(size):
|
| 232 |
+
"""Generate gradient with configurable curve.
|
| 233 |
+
BUG FIX: size==1 через linspace(0,1,1) давал [0], то есть нулевую маску.
|
| 234 |
+
Теперь для size<=1 возвращаем ones — граничный пиксель получает полный вес.
|
| 235 |
+
"""
|
| 236 |
+
if size <= 1:
|
| 237 |
+
return torch.ones(max(size, 1), device=device, dtype=dtype)
|
| 238 |
+
t = torch.linspace(0, 1, steps=size, device=device, dtype=dtype)
|
| 239 |
+
if edge_sharpness != 1.0:
|
| 240 |
+
t = torch.pow(t, edge_sharpness)
|
| 241 |
+
|
| 242 |
+
if falloff_curve == 'smoothstep':
|
| 243 |
+
return t * t * (3 - 2 * t)
|
| 244 |
+
elif falloff_curve == 'cosine':
|
| 245 |
+
return (1 - torch.cos(t * math.pi)) / 2
|
| 246 |
+
elif falloff_curve == 'linear':
|
| 247 |
+
return t
|
| 248 |
+
return t
|
| 249 |
+
|
| 250 |
+
# Fill edges
|
| 251 |
+
if blend_w > 0:
|
| 252 |
+
ramp = get_ramp(blend_w)
|
| 253 |
+
# Left edge
|
| 254 |
+
mask[:, :, :, :blend_w] = torch.maximum(mask[:, :, :, :blend_w],
|
| 255 |
+
ramp.flip(0).view(1,1,1,-1))
|
| 256 |
+
# Right edge
|
| 257 |
+
mask[:, :, :, -blend_w:] = torch.maximum(mask[:, :, :, -blend_w:],
|
| 258 |
+
ramp.view(1,1,1,-1))
|
| 259 |
+
|
| 260 |
+
if blend_h > 0:
|
| 261 |
+
ramp = get_ramp(blend_h)
|
| 262 |
+
# Top edge
|
| 263 |
+
mask[:, :, :blend_h, :] = torch.maximum(mask[:, :, :blend_h, :],
|
| 264 |
+
ramp.flip(0).view(1,1,-1,1))
|
| 265 |
+
# Bottom edge
|
| 266 |
+
mask[:, :, -blend_h:, :] = torch.maximum(mask[:, :, -blend_h:, :],
|
| 267 |
+
ramp.view(1,1,-1,1))
|
| 268 |
+
|
| 269 |
+
return mask
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# =======================================================================
|
| 273 |
+
# 6. IMPROVED BLEND PADDING (Main tiling function)
|
| 274 |
+
# =======================================================================
|
| 275 |
+
def compute_advanced_blend_padding(input_tensor, pad_h, pad_w,
|
| 276 |
+
mode_simple='replicate',
|
| 277 |
+
mode_advanced='circular',
|
| 278 |
+
blend_strength=0.5,
|
| 279 |
+
blend_width=None,
|
| 280 |
+
falloff_curve='smoothstep',
|
| 281 |
+
edge_sharpness=1.0,
|
| 282 |
+
fade_to_black=False,
|
| 283 |
+
fade_strength=0.1):
|
| 284 |
+
"""
|
| 285 |
+
IMPROVED PADDING MODE
|
| 286 |
+
|
| 287 |
+
Two operation modes:
|
| 288 |
+
|
| 289 |
+
1. FADE TO BLACK (fade_to_black=True) - for Zoom effect:
|
| 290 |
+
- Applies one padding (mode_advanced)
|
| 291 |
+
- DARKENS edges, creating zoom out effect
|
| 292 |
+
- Uses legacy compute_blend_fade_to_black function
|
| 293 |
+
|
| 294 |
+
2. BLEND TWO PADDINGS (fade_to_black=False) - for quality edges:
|
| 295 |
+
- Creates two different paddings (simple and advanced)
|
| 296 |
+
- Blends them via mask
|
| 297 |
+
- Applies variance fix for color correction
|
| 298 |
+
- Does NOT create zoom effect
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
input_tensor: Original tensor WITHOUT padding [B, C, H, W]
|
| 302 |
+
pad_h, pad_w: Padding sizes
|
| 303 |
+
mode_simple: Mode for "simple" padding ('replicate', 'constant')
|
| 304 |
+
mode_advanced: Mode for "advanced" padding ('circular', 'reflect')
|
| 305 |
+
blend_strength: Blend strength (0.0-1.0)
|
| 306 |
+
blend_width: Transition width (None = auto)
|
| 307 |
+
falloff_curve: Gradient curve type
|
| 308 |
+
edge_sharpness: Edge sharpness
|
| 309 |
+
fade_to_black: If True, uses legacy darkening mode
|
| 310 |
+
fade_strength: Darkening strength for fade_to_black mode
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
Padded tensor [B, C, H+2*pad_h, W+2*pad_w]
|
| 314 |
+
"""
|
| 315 |
+
|
| 316 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 317 |
+
# MODE 1: FADE TO BLACK (for Zoom)
|
| 318 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 319 |
+
if fade_to_black:
|
| 320 |
+
# Apply ONE padding first
|
| 321 |
+
if isinstance(mode_advanced, str):
|
| 322 |
+
if mode_advanced == 'reflect':
|
| 323 |
+
b, c, h, w = input_tensor.shape
|
| 324 |
+
if pad_w < w and pad_h < h:
|
| 325 |
+
padded = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='reflect')
|
| 326 |
+
else:
|
| 327 |
+
padded = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='replicate')
|
| 328 |
+
else: # circular
|
| 329 |
+
padded = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode=mode_advanced)
|
| 330 |
+
else:
|
| 331 |
+
padded = mode_advanced # Pre-computed tensor
|
| 332 |
+
|
| 333 |
+
# Darken edges (now works correctly!)
|
| 334 |
+
return compute_blend_fade_to_black(padded, pad_h, pad_w, fade_strength)
|
| 335 |
+
|
| 336 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 337 |
+
# MODE 2: BLEND TWO PADDINGS (Tiling)
|
| 338 |
+
# ═══════════════════════════════════════════════════════════════════
|
| 339 |
+
|
| 340 |
+
# If disabled
|
| 341 |
+
if blend_strength <= 0.001:
|
| 342 |
+
if mode_simple == 'constant':
|
| 343 |
+
return F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='constant', value=0)
|
| 344 |
+
return F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode=mode_simple)
|
| 345 |
+
|
| 346 |
+
# If 100% strength
|
| 347 |
+
if blend_strength >= 0.999:
|
| 348 |
+
if isinstance(mode_advanced, str):
|
| 349 |
+
if mode_advanced == 'reflect':
|
| 350 |
+
b, c, h, w = input_tensor.shape
|
| 351 |
+
if pad_w < w and pad_h < h:
|
| 352 |
+
return F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='reflect')
|
| 353 |
+
else:
|
| 354 |
+
return F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='replicate')
|
| 355 |
+
return F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode=mode_advanced)
|
| 356 |
+
return mode_advanced # Pre-computed tensor
|
| 357 |
+
|
| 358 |
+
# 1. Prepare layers
|
| 359 |
+
if mode_simple == 'constant':
|
| 360 |
+
simple = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='constant', value=0)
|
| 361 |
+
else:
|
| 362 |
+
simple = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode=mode_simple)
|
| 363 |
+
|
| 364 |
+
if isinstance(mode_advanced, str):
|
| 365 |
+
if mode_advanced == 'reflect':
|
| 366 |
+
b, c, h, w = input_tensor.shape
|
| 367 |
+
if pad_w < w and pad_h < h:
|
| 368 |
+
advanced = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='reflect')
|
| 369 |
+
else:
|
| 370 |
+
advanced = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='replicate')
|
| 371 |
+
else: # circular
|
| 372 |
+
advanced = F.pad(input_tensor, (pad_w, pad_w, pad_h, pad_h), mode='circular')
|
| 373 |
+
else:
|
| 374 |
+
advanced = mode_advanced # Pre-computed
|
| 375 |
+
|
| 376 |
+
# 2. Get mask from cache
|
| 377 |
+
if blend_width is None:
|
| 378 |
+
blend_width = max(pad_h, pad_w)
|
| 379 |
+
|
| 380 |
+
b, c, h, w = input_tensor.shape
|
| 381 |
+
device = input_tensor.device
|
| 382 |
+
dtype = input_tensor.dtype
|
| 383 |
+
|
| 384 |
+
# 🔴 vUltimate Fix: Enhanced cache key WITH dtype (v11+ feature)
|
| 385 |
+
cache_key = (h, w, pad_h, pad_w, blend_width, falloff_curve, edge_sharpness,
|
| 386 |
+
str(device), str(dtype))
|
| 387 |
+
mask = _MASK_CACHE.get(cache_key)
|
| 388 |
+
|
| 389 |
+
if mask is None:
|
| 390 |
+
H_pad, W_pad = simple.shape[2:]
|
| 391 |
+
mask = create_advanced_blend_mask(H_pad, W_pad, blend_width, device,
|
| 392 |
+
dtype, falloff_curve, edge_sharpness)
|
| 393 |
+
_MASK_CACHE.set(cache_key, mask)
|
| 394 |
+
|
| 395 |
+
# 3. Blend with variance fix
|
| 396 |
+
final_mask = mask * blend_strength
|
| 397 |
+
|
| 398 |
+
# 🔴 vUltimate: v11+ semantics: advanced (mask=1 on edges) / simple (mask=0 in center)
|
| 399 |
+
return blend_with_variance_fix(advanced, simple, final_mask)
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
# =======================================================================
|
| 403 |
+
# 7. MULTI-RESOLUTION (Temporal strategy)
|
| 404 |
+
# =======================================================================
|
| 405 |
+
class BlendStrategy:
|
| 406 |
+
"""Interpolation strategies for multi-resolution transitions"""
|
| 407 |
+
LINEAR = "linear"
|
| 408 |
+
COSINE = "cosine"
|
| 409 |
+
EXPONENTIAL = "exponential"
|
| 410 |
+
SIGMOID = "sigmoid"
|
| 411 |
+
|
| 412 |
+
class MultiResStrategy:
|
| 413 |
+
"""Handles temporal blending curves for progressive detail addition"""
|
| 414 |
+
def __init__(self, strategy_type=BlendStrategy.COSINE):
|
| 415 |
+
self.strategy_type = strategy_type
|
| 416 |
+
|
| 417 |
+
def get_factor(self, progress, sharpness=1.0):
|
| 418 |
+
"""Calculate blend factor based on progress (0.0 to 1.0)"""
|
| 419 |
+
t = max(0.0, min(1.0, progress))
|
| 420 |
+
if sharpness != 1.0:
|
| 421 |
+
t = math.pow(t, sharpness)
|
| 422 |
+
|
| 423 |
+
if self.strategy_type == BlendStrategy.LINEAR:
|
| 424 |
+
return t
|
| 425 |
+
elif self.strategy_type == BlendStrategy.COSINE:
|
| 426 |
+
return (1.0 - math.cos(t * math.pi)) / 2.0
|
| 427 |
+
elif self.strategy_type == BlendStrategy.EXPONENTIAL:
|
| 428 |
+
return math.pow(t, 2)
|
| 429 |
+
elif self.strategy_type == BlendStrategy.SIGMOID:
|
| 430 |
+
if t <= 0: return 0.0
|
| 431 |
+
if t >= 1: return 1.0
|
| 432 |
+
return 1.0 / (1.0 + math.exp(-12.0 * (t - 0.5)))
|
| 433 |
+
return t
|
| 434 |
+
|
| 435 |
+
def apply_multires_blend(tensor_simple, tensor_advanced, current_step,
|
| 436 |
+
start_step, end_step,
|
| 437 |
+
strategy="cosine",
|
| 438 |
+
transition_start=0.0,
|
| 439 |
+
transition_end=0.3,
|
| 440 |
+
sharpness=1.0,
|
| 441 |
+
enabled=False):
|
| 442 |
+
"""
|
| 443 |
+
Progressive blending from simple to advanced over denoising steps.
|
| 444 |
+
|
| 445 |
+
Args:
|
| 446 |
+
tensor_simple: Low-detail padding result
|
| 447 |
+
tensor_advanced: High-detail padding result
|
| 448 |
+
current_step: Current denoising step
|
| 449 |
+
start_step, end_step: Denoising range
|
| 450 |
+
strategy: Interpolation curve type
|
| 451 |
+
transition_start, transition_end: Transition window (0.0-1.0)
|
| 452 |
+
sharpness: Curve adjustment
|
| 453 |
+
enabled: Master switch
|
| 454 |
+
|
| 455 |
+
Returns:
|
| 456 |
+
Blended tensor
|
| 457 |
+
"""
|
| 458 |
+
if not enabled:
|
| 459 |
+
return tensor_advanced
|
| 460 |
+
|
| 461 |
+
# BUG FIX 6b: normalise strategy to a supported value before use.
|
| 462 |
+
_KNOWN_STRATEGIES = {BlendStrategy.LINEAR, BlendStrategy.COSINE,
|
| 463 |
+
BlendStrategy.EXPONENTIAL, BlendStrategy.SIGMOID}
|
| 464 |
+
_STRATEGY_ALIASES = {
|
| 465 |
+
'linear': BlendStrategy.LINEAR,
|
| 466 |
+
'cosine': BlendStrategy.COSINE,
|
| 467 |
+
'exponential': BlendStrategy.EXPONENTIAL,
|
| 468 |
+
'sigmoid': BlendStrategy.SIGMOID,
|
| 469 |
+
}
|
| 470 |
+
if isinstance(strategy, str):
|
| 471 |
+
strategy_key = strategy.lower()
|
| 472 |
+
if strategy_key not in _STRATEGY_ALIASES:
|
| 473 |
+
print(f"[MultiRes] Warning: unsupported strategy '{strategy}' "
|
| 474 |
+
f"— falling back to 'cosine'. "
|
| 475 |
+
f"Supported: {sorted(_STRATEGY_ALIASES.keys())}")
|
| 476 |
+
strategy = BlendStrategy.COSINE
|
| 477 |
+
else:
|
| 478 |
+
strategy = _STRATEGY_ALIASES[strategy_key]
|
| 479 |
+
elif strategy not in _KNOWN_STRATEGIES:
|
| 480 |
+
print(f"[MultiRes] Warning: unknown strategy {strategy!r} — falling back to cosine")
|
| 481 |
+
strategy = BlendStrategy.COSINE
|
| 482 |
+
|
| 483 |
+
total_steps = end_step - start_step
|
| 484 |
+
if total_steps <= 0:
|
| 485 |
+
return tensor_advanced
|
| 486 |
+
|
| 487 |
+
step_frac = (current_step - start_step) / total_steps
|
| 488 |
+
step_frac = max(0.0, min(1.0, step_frac))
|
| 489 |
+
|
| 490 |
+
if step_frac < transition_start:
|
| 491 |
+
local_progress = 0.0
|
| 492 |
+
elif step_frac > transition_end:
|
| 493 |
+
local_progress = 1.0
|
| 494 |
+
else:
|
| 495 |
+
duration = transition_end - transition_start
|
| 496 |
+
if duration <= 0:
|
| 497 |
+
local_progress = 1.0
|
| 498 |
+
else:
|
| 499 |
+
local_progress = (step_frac - transition_start) / duration
|
| 500 |
+
|
| 501 |
+
strat = MultiResStrategy(strategy)
|
| 502 |
+
alpha = strat.get_factor(local_progress, sharpness)
|
| 503 |
+
|
| 504 |
+
if alpha <= 0.001:
|
| 505 |
+
return tensor_simple
|
| 506 |
+
if alpha >= 0.999:
|
| 507 |
+
return tensor_advanced
|
| 508 |
+
|
| 509 |
+
# Standard lerp (temporal blend, not spatial)
|
| 510 |
+
return tensor_simple * (1.0 - alpha) + tensor_advanced * alpha
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
# =======================================================================
|
| 514 |
+
# 8. HELPER FUNCTIONS (From v13 for compatibility)
|
| 515 |
+
# =======================================================================
|
| 516 |
+
|
| 517 |
+
def create_circular_mask(h, w, center_x=0.5, center_y=0.5, radius=0.5,
|
| 518 |
+
device='cpu', dtype=torch.float32):
|
| 519 |
+
"""
|
| 520 |
+
Creates circular mask (white circle on black background).
|
| 521 |
+
✅ FLOAT16 FIX: Safe sqrt
|
| 522 |
+
|
| 523 |
+
NOTE: This is v13 version (radial distance mask).
|
| 524 |
+
Different from v1/exp which don't have this function.
|
| 525 |
+
"""
|
| 526 |
+
eps_val = get_safe_epsilon(dtype)
|
| 527 |
+
|
| 528 |
+
# Create coordinate grid
|
| 529 |
+
y, x = torch.meshgrid(
|
| 530 |
+
torch.linspace(-1, 1, h, device=device, dtype=dtype),
|
| 531 |
+
torch.linspace(-1, 1, w, device=device, dtype=dtype),
|
| 532 |
+
indexing='ij'
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
# Shift center
|
| 536 |
+
x = x - (center_x - 0.5) * 2
|
| 537 |
+
y = y - (center_y - 0.5) * 2
|
| 538 |
+
|
| 539 |
+
# Calculate distance from center with protection
|
| 540 |
+
dist = torch.sqrt(x*x + y*y + eps_val)
|
| 541 |
+
|
| 542 |
+
# Create soft mask (smooth edges 0.1)
|
| 543 |
+
mask = 1.0 - torch.clamp((dist - (radius - 0.1)) / 0.2, 0, 1)
|
| 544 |
+
|
| 545 |
+
# Add channel and batch dimensions
|
| 546 |
+
if len(mask.shape) == 2:
|
| 547 |
+
mask = mask.unsqueeze(0).unsqueeze(0)
|
| 548 |
+
|
| 549 |
+
return mask
|
| 550 |
+
|
| 551 |
+
def create_fade_to_black_mask(h, w, strength=0.1, device='cpu', dtype=torch.float32):
|
| 552 |
+
"""
|
| 553 |
+
Creates vignette (darkening towards edges).
|
| 554 |
+
✅ FLOAT16 FIX: Safe sqrt
|
| 555 |
+
|
| 556 |
+
NOTE: This is v13 version (radial vignette).
|
| 557 |
+
Different from v1/exp which don't have this function.
|
| 558 |
+
"""
|
| 559 |
+
eps_val = get_safe_epsilon(dtype)
|
| 560 |
+
|
| 561 |
+
y, x = torch.meshgrid(
|
| 562 |
+
torch.linspace(-1, 1, h, device=device, dtype=dtype),
|
| 563 |
+
torch.linspace(-1, 1, w, device=device, dtype=dtype),
|
| 564 |
+
indexing='ij'
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
# sqrt with protection
|
| 568 |
+
dist = torch.sqrt(x*x + y*y + eps_val)
|
| 569 |
+
|
| 570 |
+
# Normalize so corners are 1.0 (max distance ~1.41)
|
| 571 |
+
dist = dist / 1.4142
|
| 572 |
+
|
| 573 |
+
# Invert: center white (1), edges black (0)
|
| 574 |
+
threshold = 1.0 - strength
|
| 575 |
+
mask = 1.0 - torch.clamp((dist - threshold) / strength, 0, 1)
|
| 576 |
+
|
| 577 |
+
if len(mask.shape) == 2:
|
| 578 |
+
mask = mask.unsqueeze(0).unsqueeze(0)
|
| 579 |
+
|
| 580 |
+
return mask
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
# =======================================================================
|
| 584 |
+
# 9. PARAMETER VALIDATION HELPERS
|
| 585 |
+
# =======================================================================
|
| 586 |
+
|
| 587 |
+
def validate_blend_params(params):
|
| 588 |
+
"""Extract and validate blend parameters from dict.
|
| 589 |
+
Unsupported falloff values are normalised here with a warning so callers
|
| 590 |
+
never silently receive a mode the backend cannot honour.
|
| 591 |
+
Supported: 'linear', 'smoothstep', 'cosine'
|
| 592 |
+
"""
|
| 593 |
+
falloff = params.get('blend_falloff', 'smoothstep')
|
| 594 |
+
_SUPPORTED_FALLOFFS = {'linear', 'smoothstep', 'cosine'}
|
| 595 |
+
if falloff not in _SUPPORTED_FALLOFFS:
|
| 596 |
+
print(f"[validate_blend_params] Warning: unsupported blend_falloff '{falloff}' "
|
| 597 |
+
f"— falling back to 'smoothstep'. Supported: {sorted(_SUPPORTED_FALLOFFS)}")
|
| 598 |
+
falloff = 'smoothstep'
|
| 599 |
+
|
| 600 |
+
raw_width = params.get('blend_width', None)
|
| 601 |
+
width = None if raw_width is None else int(raw_width)
|
| 602 |
+
if width is not None and width <= 0:
|
| 603 |
+
# UI contract: 0 means auto. Keep helper-level validation aligned with
|
| 604 |
+
# the main script and avoid surprising low-level whole-frame blending.
|
| 605 |
+
width = None
|
| 606 |
+
|
| 607 |
+
return {
|
| 608 |
+
'strength': float(params.get('blend_strength', 0.5)),
|
| 609 |
+
'width': width,
|
| 610 |
+
'falloff': falloff,
|
| 611 |
+
'sharpness': float(params.get('blend_sharpness', 1.0)),
|
| 612 |
+
'fade_to_black': _coerce_bool_param(params.get('blend_fade_to_black', False), False),
|
| 613 |
+
'fade_strength': float(params.get('blend_fade_strength', 0.1))
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
def validate_multires_params(params):
|
| 617 |
+
"""Extract and validate multi-resolution parameters from dict.
|
| 618 |
+
Unsupported strategy values are normalised here with a warning.
|
| 619 |
+
Supported: 'linear', 'cosine', 'exponential', 'sigmoid'
|
| 620 |
+
"""
|
| 621 |
+
strategy = params.get('multires_strategy', 'cosine')
|
| 622 |
+
_SUPPORTED_STRATEGIES = {'linear', 'cosine', 'exponential', 'sigmoid'}
|
| 623 |
+
if strategy not in _SUPPORTED_STRATEGIES:
|
| 624 |
+
print(f"[validate_multires_params] Warning: unsupported multires_strategy '{strategy}' "
|
| 625 |
+
f"— falling back to 'cosine'. Supported: {sorted(_SUPPORTED_STRATEGIES)}")
|
| 626 |
+
strategy = 'cosine'
|
| 627 |
+
|
| 628 |
+
return {
|
| 629 |
+
'strategy': strategy,
|
| 630 |
+
'transition_start': float(params.get('multires_start', 0.0)),
|
| 631 |
+
'transition_end': float(params.get('multires_end', 0.3)),
|
| 632 |
+
'sharpness': float(params.get('multires_sharpness', 1.0))
|
| 633 |
+
}
|
| 634 |
+
|
| 635 |
+
# =======================================================================
|
| 636 |
+
# vUltimate - End of File
|
| 637 |
+
# CRITICAL FIXES APPLIED:
|
| 638 |
+
# ✅ Fix 1: Infinite recursion in get_safe_epsilon (v13 bug at line 51/65)
|
| 639 |
+
# ✅ Fix 2: Correct v11+ mask semantics (advanced first, mask=1.0 on edges)
|
| 640 |
+
# ✅ Fix 3: Cache key includes dtype (v11+ improvement)
|
| 641 |
+
# ✅ Fix 4: Safe reflect with size validation
|
| 642 |
+
# ✅ Fix 5: v13 fade_to_black logic (not v7 logic)
|
| 643 |
+
# ✅ Fix 6: v13 circular/vignette masks (not v1/exp)
|
| 644 |
+
# =======================================================================
|
asds/scripts/asymmetric_tiling_UNIFIED (69).py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|