Note
You are reading the documentation for MMSelfSup 0.x, which will soon be deprecated by the end of 2022. We recommend you upgrade to MMSelfSup 1.0.0rc versions to enjoy fruitful new features and better performance brought by OpenMMLab 2.0. Check out the changelog, code and documentation of MMSelfSup 1.0.0rc for more details.
Source code for mmselfsup.models.heads.simmim_head
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner import BaseModule
from torch.nn import functional as F
from ..builder import HEADS
[docs]@HEADS.register_module()
class SimMIMHead(BaseModule):
"""Pretrain Head for SimMIM.
Args:
patch_size (int): Patch size of each token.
encoder_in_channels (int): Number of input channels for encoder.
"""
def __init__(self, patch_size: int, encoder_in_channels: int) -> None:
super(SimMIMHead, self).__init__()
self.patch_size = patch_size
self.encoder_in_channels = encoder_in_channels
[docs] def forward(self, x: torch.Tensor, x_rec: torch.Tensor,
mask: torch.Tensor) -> dict:
losses = dict()
mask = mask.repeat_interleave(self.patch_size, 1).repeat_interleave(
self.patch_size, 2).unsqueeze(1).contiguous()
loss_rec = F.l1_loss(x, x_rec, reduction='none')
loss = (loss_rec * mask).sum() / (mask.sum() +
1e-5) / self.encoder_in_channels
losses['loss'] = loss
return losses