| """ |
| Collection of various utils |
| """ |
|
|
| import numpy as np |
|
|
| import imageio.v3 as iio |
| from PIL import Image |
| |
| Image.MAX_IMAGE_PIXELS = 933120000 |
|
|
| import matplotlib.pyplot as plt |
| import matplotlib.patches as patches |
| from matplotlib.lines import Line2D |
| import logging |
| import math |
|
|
|
|
| |
| |
| |
| def load_image(filename : str) -> np.ndarray : |
| """Load an SEM image |
| |
| Args: |
| filename (str): full path and name of the image file to be loaded |
| |
| Returns: |
| np.ndarray: file as numpy ndarray |
| """ |
| image = iio.imread(filename,mode='F') |
|
|
| return image |
|
|
|
|
| |
| |
| |
| def show_boxes(image : np.ndarray, damage_sites : dict, box_size = [250,250], |
| save_image = False, image_path : str = None) : |
| """ |
| Shows an SEM image with colored boxes around identified damage sites. |
| |
| Args: |
| image (np.ndarray): SEM image to be shown. |
| damage_sites (dict): Python dictionary using the coordinates as key (x,y), and the label as value. |
| box_size (list, optional): Size of the rectangle drawn around each centroid. Defaults to [250,250]. |
| save_image (bool, optional): Save the image with the boxes or not. Defaults to False. |
| image_path (str, optional) : Full path and name of the output file to be saved. |
| """ |
| logging.info(f"show_boxes: Input image type: {type(image)}") |
|
|
| |
| if isinstance(image, Image.Image): |
| image_to_plot = np.array(image.convert('L')) |
| logging.info("show_boxes: Converted PIL Image to grayscale NumPy array for plotting.") |
| elif isinstance(image, np.ndarray): |
| if image.ndim == 3 and image.shape[2] in [3,4]: |
| image_to_plot = np.mean(image, axis=2).astype(image.dtype) |
| logging.info("show_boxes: Converted multi-channel NumPy array to grayscale for plotting.") |
| else: |
| image_to_plot = image |
| logging.info("show_boxes: Image is already a grayscale NumPy array.") |
| else: |
| logging.error("show_boxes: Unsupported image format received.") |
| image_to_plot = np.zeros((100,100), dtype=np.uint8) |
|
|
|
|
| _, ax = plt.subplots(1) |
| ax.imshow(image_to_plot, cmap='gray') |
| ax.set_xticks([]) |
| ax.set_yticks([]) |
|
|
| for key, label in damage_sites.items(): |
| position = [key[0], key[1]] |
| |
| edgecolor = { |
| 'Inclusion': 'b', |
| 'Interface': 'g', |
| 'Martensite': 'r', |
| 'Notch': 'y', |
| 'Shadowing': 'm', |
| 'Not Classified': 'k' |
| }.get(label, 'k') |
|
|
| |
| half_box_w = box_size[1] / 2.0 |
| half_box_h = box_size[0] / 2.0 |
|
|
| |
| rect_x = position[1] - half_box_w |
| |
| rect_y = position[0] - half_box_h |
|
|
| rect = patches.Rectangle((rect_x, rect_y), |
| box_size[1], box_size[0], |
| linewidth=1, edgecolor=edgecolor, facecolor='none') |
| ax.add_patch(rect) |
|
|
| legend_elements = [ |
| Line2D([0], [0], color='b', lw=4, label='Inclusion'), |
| Line2D([0], [0], color='g', lw=4, label='Interface'), |
| Line2D([0], [0], color='r', lw=4, label='Martensite'), |
| Line2D([0], [0], color='y', lw=4, label='Notch'), |
| Line2D([0], [0], color='m', lw=4, label='Shadow'), |
| Line2D([0], [0], color='k', lw=4, label='Not Classified') |
| ] |
| ax.legend(handles=legend_elements, bbox_to_anchor=(1.04, 1), loc="upper left") |
|
|
| fig = ax.figure |
| fig.tight_layout(pad=0) |
|
|
| if save_image and image_path: |
| fig.savefig(image_path, dpi=1200, bbox_inches='tight') |
|
|
| canvas = fig.canvas |
| canvas.draw() |
|
|
| data = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8).reshape( |
| canvas.get_width_height()[::-1] + (4,)) |
| data = data[:, :, :3] |
|
|
| plt.close(fig) |
|
|
| return data |
|
|
|
|
| |
| |
| |
| def prepare_classifier_input(panorama, centroids: list, window_size=[250, 250]) -> list: |
| """ |
| Extracts square image patches centered at each given centroid from a grayscale panoramic SEM image. |
| |
| Each extracted patch is resized to the specified window size and converted into a 3-channel (RGB-like) |
| normalized image suitable for use with classification neural networks that expect color input. |
| |
| Parameters |
| ---------- |
| panorama : PIL.Image.Image or np.ndarray |
| Input SEM image. Should be a 2D array (H, W) or a 3D array (H, W, 1) representing grayscale data, |
| or a PIL Image object. |
| |
| centroids : list of [int, int] |
| List of (y, x) coordinates marking the centers of regions of interest. These are typically damage sites |
| identified in preprocessing (e.g., clustering). |
| |
| window_size : list of int, optional |
| Size [height, width] of each extracted image patch. Defaults to [250, 250]. |
| |
| Returns |
| ------- |
| list of np.ndarray |
| List of extracted and normalized 3-channel image patches, each with shape (height, width, 3). Only |
| centroids that allow full window extraction within image bounds are used. |
| """ |
| logging.info(f"prepare_classifier_input: Input panorama type: {type(panorama)}") |
|
|
| |
| |
| if isinstance(panorama, Image.Image): |
| |
| if panorama.mode == 'RGB': |
| panorama_array = np.array(panorama.convert('L')) |
| logging.info("prepare_classifier_input: Converted RGB PIL Image to grayscale NumPy array.") |
| else: |
| panorama_array = np.array(panorama) |
| logging.info("prepare_classifier_input: Converted PIL Image to grayscale NumPy array.") |
| elif isinstance(panorama, np.ndarray): |
| |
| if panorama.ndim == 3 and panorama.shape[2] in [3, 4]: |
| panorama_array = np.mean(panorama, axis=2).astype(panorama.dtype) |
| logging.info("prepare_classifier_input: Converted multi-channel NumPy array to grayscale.") |
| else: |
| panorama_array = panorama |
| logging.info("prepare_classifier_input: Panorama is already a suitable NumPy array.") |
| else: |
| logging.error("prepare_classifier_input: Unsupported panorama format received. Expected PIL Image or NumPy array.") |
| raise ValueError("Unsupported panorama format for classifier input.") |
|
|
| |
| if panorama_array.ndim == 2: |
| panorama_array = np.expand_dims(panorama_array, axis=-1) |
| logging.info("prepare_classifier_input: Expanded 2D panorama to 3D (H,W,1).") |
| |
|
|
| H, W, _ = panorama_array.shape |
| win_h, win_w = window_size |
| images = [] |
|
|
| for (cy, cx) in centroids: |
| |
| cy, cx = int(round(cy)), int(round(cx)) |
|
|
| x1 = int(cx - win_w / 2) |
| y1 = int(cy - win_h / 2) |
| x2 = x1 + win_w |
| y2 = y1 + win_h |
|
|
| |
| if x1 < 0 or y1 < 0 or x2 > W or y2 > H: |
| logging.warning(f"prepare_classifier_input: Skipping centroid ({cy},{cx}) as patch is out of bounds.") |
| continue |
|
|
| |
| patch = panorama_array[y1:y2, x1:x2, 0].astype(np.float32) |
| patch = patch * 2. / 255. - 1. |
|
|
| |
| patch_color = np.repeat(patch[:, :, np.newaxis], 3, axis=2) |
| images.append(patch_color) |
|
|
| return images |
|
|
|
|