| import torch.nn as nn |
| import torch.nn.functional as F |
| from fvcore.common.registry import Registry |
| import torch |
|
|
| LOSS_REGISTRY = Registry("loss") |
|
|
| def og3d_loss(data_dict): |
| return F.cross_entropy(data_dict["og3d_logits"], data_dict["tgt_object_id"].squeeze(1)) |
|
|
|
|
| def og3d_multi_loss(data_dict): |
| return F.binary_cross_entropy_with_logits( |
| data_dict["og3d_logits"], |
| data_dict["tgt_object_id"].float(), |
| reduction="sum") / float(data_dict["tgt_object_id"].shape[0]) |
|
|
|
|
| def txt_cls_multi_loss(data_dict): |
| return F.binary_cross_entropy_with_logits( |
| data_dict["txt_cls_logits"], |
| data_dict["tgt_object_label"].float(), |
| reduction='sum') / float(data_dict["tgt_object_label"].shape[0]) |
|
|
|
|
| def obj_cls_raw_loss(data_dict): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_cls_raw_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none' |
| ) * data_dict["obj_masks"] |
| ).sum() / data_dict["obj_masks"].sum() |
|
|
|
|
| def obj_cls_pre_loss(data_dict): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_cls_pre_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none' |
| ) * data_dict["obj_masks"] |
| ).sum() / data_dict["obj_masks"].sum() |
|
|
|
|
| def obj_cls_post_loss(data_dict): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_cls_post_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none' |
| ) * data_dict["obj_masks"] |
| ).sum() / data_dict["obj_masks"].sum() |
|
|
|
|
| def answer_loss(data_dict): |
| return F.binary_cross_entropy_with_logits( |
| data_dict["answer_scores"], data_dict["answer_label"].float(), reduction='sum' |
| ) / data_dict["answer_scores"].shape[0] |
|
|
|
|
| def lm_cls_loss(data_dict): |
| target_labels = data_dict["masked_lm_labels"] |
| target_labels = target_labels.view(-1, target_labels.size(-1)) if len(target_labels.size()) == 3 else target_labels |
| return F.cross_entropy( |
| data_dict["txt_lm_cls_logits"].permute(0, 2, 1), target_labels, ignore_index=-1 |
| ) |
|
|
|
|
| def obj_cls_pre_loss_mask(data_dict): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_cls_pre_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none' |
| ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not() |
| ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not()).sum() |
|
|
|
|
| def obj_cls_pre_loss_unmask(data_dict): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_cls_pre_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none' |
| ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"] |
| ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"]).sum() |
|
|
|
|
| def obj_cls_post_loss_mask(data_dict): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_cls_post_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none' |
| ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not() |
| ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not()).sum() |
|
|
|
|
| def obj_cls_post_loss_unmask(data_dict): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_cls_post_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none' |
| ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"] |
| ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"]).sum() |
|
|
|
|
| def obj_cls_loss(data_dict, smoothing=0.3): |
| return ( |
| F.cross_entropy( |
| data_dict["obj_logits"].permute(0, 2, 1), data_dict["obj_labels"], |
| reduction='none', label_smoothing=smoothing |
| ) * data_dict["obj_masks"] |
| ).sum() / data_dict["obj_masks"].sum() |
|
|
|
|
| def mse_loss(data_dict): |
| return ( |
| ((data_dict["pred_images"] - data_dict["target_images"]) ** 2).mean() |
| ) |
|
|
| class Loss(nn.Module): |
| def __init__(self, cfg, accelerator): |
| |
| |
| |
| |
| super().__init__() |
| self.all_keys = list(set(cfg.model.vis_loss_list + cfg.model.loss_list)) |
| self.selected_keys = cfg.model.loss_list |
|
|
| self.loss_fn = {} |
| for k in self.all_keys: |
| if k in globals().keys(): |
| self.loss_fn[k] = globals()[k] |
| print(f"Using {k} from loss.globals()") |
| else: |
| self.loss_fn[k] = LOSS_REGISTRY.get(k)(cfg, accelerator) |
| setattr(self, k, self.loss_fn[k]) |
| print(f"Using {k} from Registry {LOSS_REGISTRY._name}") |
|
|
| def forward(self, data_dict): |
| all_losses = {} |
|
|
| |
| if 'txt_cls_loss' in self.loss_fn and 'txt_cls_label' not in data_dict: |
| data_dict['txt_cls_label'] = data_dict["tgt_object_label"].squeeze(1) |
|
|
| for k, fn in self.loss_fn.items(): |
| |
| cur_loss = fn(data_dict) |
|
|
| if isinstance(cur_loss, dict): |
| all_losses.update(cur_loss) |
| else: |
| all_losses[k] = cur_loss |
|
|
| total_loss = sum(all_losses.values()) |
| all_losses["total_loss"] = total_loss |
|
|
| return total_loss, all_losses |
|
|
|
|