Shortcuts

Note

You are reading the documentation for MMSelfSup 0.x, which will soon be deprecated by the end of 2022. We recommend you upgrade to MMSelfSup 1.0.0rc versions to enjoy fruitful new features and better performance brought by OpenMMLab 2.0. Check out the changelog, code and documentation of MMSelfSup 1.0.0rc for more details.

Source code for mmselfsup.models.algorithms.base

# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from collections import OrderedDict

import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16


[docs]class BaseModel(BaseModule, metaclass=ABCMeta): """Base model class for self-supervised learning.""" def __init__(self, init_cfg=None): super(BaseModel, self).__init__(init_cfg) self.fp16_enabled = False @property def with_neck(self): return hasattr(self, 'neck') and self.neck is not None @property def with_head(self): return hasattr(self, 'head') and self.head is not None
[docs] @abstractmethod def extract_feat(self, imgs): """Function to extract features from backbone. Args: img (Tensor): Input images. Typically these should be mean centered and std scaled. """ pass
[docs] @abstractmethod def forward_train(self, imgs, **kwargs): """ Args: img ([Tensor): List of tensors. Typically these should be mean centered and std scaled. kwargs (keyword arguments): Specific to concrete implementation. """ pass
[docs] def forward_test(self, imgs, **kwargs): """ Args: img (Tensor): List of tensors. Typically these should be mean centered and std scaled. kwargs (keyword arguments): Specific to concrete implementation. """ pass
[docs] @auto_fp16(apply_to=('img', )) def forward(self, img, mode='train', **kwargs): """Forward function of model. Calls either forward_train, forward_test or extract_feat function according to the mode. """ if mode == 'train': return self.forward_train(img, **kwargs) elif mode == 'test': return self.forward_test(img, **kwargs) elif mode == 'extract': return self.extract_feat(img) else: raise Exception(f'No such mode: {mode}')
def _parse_losses(self, losses): """Parse the raw outputs (losses) of the network. Args: losses (dict): Raw output of the network, which usually contain losses and other necessary information. Returns: tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor which may be a weighted sum of all losses, log_vars contains all the variables to be sent to the logger. """ log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) elif isinstance(loss_value, dict): for name, value in loss_value.items(): log_vars[name] = value else: raise TypeError( f'{loss_name} is not a tensor or list of tensors') loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) log_vars['loss'] = loss for loss_name, loss_value in log_vars.items(): # reduce loss when distributed training if dist.is_available() and dist.is_initialized(): loss_value = loss_value.data.clone() dist.all_reduce(loss_value.div_(dist.get_world_size())) log_vars[loss_name] = loss_value.item() return loss, log_vars
[docs] def train_step(self, data, optimizer): """The iteration step during training. This method defines an iteration step during training, except for the back propagation and optimizer updating, which are done in an optimizer hook. Note that in some complicated cases or models, the whole process including back propagation and optimizer updating are also defined in this method, such as GAN. Args: data (dict): The output of dataloader. optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of runner is passed to ``train_step()``. This argument is unused and reserved. Returns: dict: Dict of outputs. The following fields are contained. - loss (torch.Tensor): A tensor for back propagation, which \ can be a weighted sum of multiple losses. - log_vars (dict): Dict contains all the variables to be sent \ to the logger. - num_samples (int): Indicates the batch size (when the model \ is DDP, it means the batch size on each GPU), which is \ used for averaging the logs. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) if isinstance(data['img'], list): num_samples = len(data['img'][0].data) else: num_samples = len(data['img'].data) outputs = dict(loss=loss, log_vars=log_vars, num_samples=num_samples) return outputs
[docs] def val_step(self, data, optimizer): """The iteration step during validation. This method shares the same signature as :func:`train_step`, but used during val epochs. Note that the evaluation after training epochs is not implemented with this method, but an evaluation hook. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) if isinstance(data['img'], list): num_samples = len(data['img'][0].data) else: num_samples = len(data['img'].data) outputs = dict(loss=loss, log_vars=log_vars, num_samples=num_samples) return outputs
Read the Docs v: 0.x
Versions
latest
stable
1.x
dev-1.x
0.x
Downloads
On Read the Docs
Project Home
Builds

Free document hosting provided by Read the Docs.