-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathproj_head.py
109 lines (101 loc) · 4.29 KB
/
proj_head.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# ---------------------------------------------------------------
# Copyright (c) 2022 BIT-DA. All rights reserved.
# Licensed under the Apache License, Version 2.0
# ---------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .decode_head_decorator import BaseDecodeHeadDecorator
@HEADS.register_module()
class ProjHead(BaseDecodeHeadDecorator):
"""Projection Head for feature dimension reduction in contrastive loss.
Args:
num_convs (int): Number of convs in the head. Default: 2.
kernel_size (int): The kernel size for convs in the head. Default: 3.
concat_input (bool): Whether concat the input and output of convs
before classification layer.
dilation (int): The dilation rate for convs in the head. Default: 1.
"""
def __init__(self,
num_convs=2,
kernel_size=1,
dilation=1,
**kwargs):
assert num_convs in (0, 1, 2) and dilation > 0 and isinstance(dilation, int)
self.num_convs = num_convs
self.kernel_size = kernel_size
super(ProjHead, self).__init__(**kwargs)
if num_convs == 0:
assert self.in_channels == self.channels
conv_padding = (kernel_size // 2) * dilation
if self.input_transform == 'multiple_select':
convs = [[] for _ in range(len(self.in_channels))]
for i in range(len(self.in_channels)):
if num_convs > 1:
convs[i].append(
ConvModule(
self.in_channels[i],
self.in_channels[i],
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
convs[i].append(
ConvModule(
self.in_channels[i],
self.channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
if num_convs == 0:
self.convs = nn.ModuleList([nn.Identity() for _ in range(len(self.in_channels))])
else:
self.convs = nn.ModuleList([nn.Sequential(*convs[i]) for i in range(len(self.in_channels))])
else:
if self.input_transform == 'resize_concat':
self.mid_channels = self.in_channels // len(self.in_index)
else:
self.mid_channels = self.in_channels
convs = []
if num_convs > 1:
convs.append(
ConvModule(
self.in_channels,
self.mid_channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
convs.append(
ConvModule(
self.mid_channels,
self.channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
if num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
if isinstance(x, list):
# multiple_select
output = [F.normalize(self.convs[i](x[i]), p=2, dim=1) for i in range(len(x))]
else:
# resize_concat or single_select
output = F.normalize(self.convs(x), p=2, dim=1)
return output