import torch class Canvas_Crop: """ Reduce the canvas of an image by cropping pixels from each side. Inputs ------ image : IMAGE Torch tensor [B, H, W, C], C=3 (RGB) or C=4 (RGBA), values in [0, 1]. If RGB is provided, it will be converted to RGBA by appending an opaque alpha channel. left, right, up, down : INT Number of pixels to remove from the corresponding side. Output ------ image : IMAGE Torch tensor [B, H - (up+down), W - (left+right), 4] (RGBA). """ @classmethod def INPUT_TYPES(s): return { "required": { "image": ("IMAGE",), "left": ("INT", {"default": 0, "min": 0, "max": 16384, "step": 1}), "right": ("INT", {"default": 0, "min": 0, "max": 16384, "step": 1}), "up": ("INT", {"default": 0, "min": 0, "max": 16384, "step": 1}), "down": ("INT", {"default": 0, "min": 0, "max": 16384, "step": 1}), } } RETURN_TYPES = ("IMAGE",) RETURN_NAMES = ("image",) FUNCTION = "crop" CATEGORY = "image/canvas" def _ensure_rgba(self, img: torch.Tensor) -> torch.Tensor: # Expect [B, H, W, C] if img.dim() != 4: raise ValueError(f"Expected image tensor [B, H, W, C], got shape {tuple(img.shape)}") c = img.shape[-1] if c == 4: return img elif c == 3: # Add opaque alpha if only RGB provided alpha = torch.ones((*img.shape[:-1], 1), dtype=img.dtype, device=img.device) return torch.cat([img, alpha], dim=-1) elif c > 4: # If extra channels exist, keep the first 4 (RGBA) return img[..., :4] else: raise ValueError("Expected RGB or RGBA image with 3 or 4 channels.") def crop(self, image: torch.Tensor, left: int, right: int, up: int, down: int): rgba = self._ensure_rgba(image) # [B, H, W, 4] b, h, w, _ = rgba.shape # Sanitize left = max(0, int(left)) right = max(0, int(right)) up = max(0, int(up)) down = max(0, int(down)) # Validate that the requested crop is possible if left + right >= w: raise ValueError( f"Crop too wide: left({left}) + right({right}) >= image width({w})." ) if up + down >= h: raise ValueError( f"Crop too tall: up({up}) + down({down}) >= image height({h})." ) # Compute crop window y0 = up y1 = h - down x0 = left x1 = w - right # Perform crop out = rgba[:, y0:y1, x0:x1, :] return (out,) # Required mappings for ComfyUI to discover the node NODE_CLASS_MAPPINGS = { "Canvas_Crop": Canvas_Crop } NODE_DISPLAY_NAME_MAPPINGS = { "Canvas_Crop": "Canvas_Crop (Salia)", }