forked from ChunML/ssd-tf2
-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathlayers.py
More file actions
executable file
·116 lines (91 loc) · 3.87 KB
/
layers.py
File metadata and controls
executable file
·116 lines (91 loc) · 3.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import tensorflow as tf
import tensorflow.keras.layers as layers
from tensorflow.keras import Sequential
def create_vgg16_layers():
vgg16_conv4 = [
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPool2D(2, 2, padding='same'),
layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.MaxPool2D(2, 2, padding='same'),
layers.Conv2D(256, 3, padding='same', activation='relu'),
layers.Conv2D(256, 3, padding='same', activation='relu'),
layers.Conv2D(256, 3, padding='same', activation='relu'),
layers.MaxPool2D(2, 2, padding='same'),
layers.Conv2D(512, 3, padding='same', activation='relu'),
layers.Conv2D(512, 3, padding='same', activation='relu'),
layers.Conv2D(512, 3, padding='same', activation='relu'),
layers.MaxPool2D(2, 2, padding='same'),
layers.Conv2D(512, 3, padding='same', activation='relu'),
layers.Conv2D(512, 3, padding='same', activation='relu'),
layers.Conv2D(512, 3, padding='same', activation='relu'),
]
x = layers.Input(shape=[None, None, 3])
out = x
for layer in vgg16_conv4:
out = layer(out)
vgg16_conv4 = tf.keras.Model(x, out)
vgg16_conv7 = [
# Difference from original VGG16:
# 5th maxpool layer has kernel size = 3 and stride = 1
layers.MaxPool2D(3, 1, padding='same'),
# atrous conv2d for 6th block
layers.Conv2D(1024, 3, padding='same',
dilation_rate=6, activation='relu'),
layers.Conv2D(1024, 1, padding='same', activation='relu'),
]
x = layers.Input(shape=[None, None, 512])
out = x
for layer in vgg16_conv7:
out = layer(out)
vgg16_conv7 = tf.keras.Model(x, out)
return vgg16_conv4, vgg16_conv7
def create_extra_layers():
""" Create extra layers
8th to 11th blocks
"""
extra_layers = [
layers.Conv2D(256, 1, activation='relu'),
layers.Conv2D(512, 3, strides=2, padding='same', activation='relu'),
layers.Conv2D(128, 1, activation='relu'),
layers.Conv2D(256, 3, strides=2, padding='same', activation='relu'),
layers.Conv2D(128, 1, activation='relu'),
layers.Conv2D(256, 3, activation='relu'),
layers.Conv2D(128, 1, activation='relu'),
layers.Conv2D(256, 3, activation='relu')]
x = layers.Input(shape=[None, None, 1024])
out = x
for layer in extra_layers:
out = layer(out)
extra_layers = tf.keras.Model(x, out)
return extra_layers
def create_conf_head_layers(num_classes):
""" Create layers for classification
"""
conf_head_layers = [
layers.Conv2D(4 * num_classes, kernel_size=3,
padding='same'), # for 4th block
layers.Conv2D(6 * num_classes, kernel_size=3,
padding='same'), # for 7th block
layers.Conv2D(6 * num_classes, kernel_size=3,
padding='same'), # for 8th block
layers.Conv2D(6 * num_classes, kernel_size=3,
padding='same'), # for 9th block
layers.Conv2D(4 * num_classes, kernel_size=3,
padding='same'), # for 10th block
layers.Conv2D(4 * num_classes, kernel_size=1) # for 12th block
]
return conf_head_layers
def create_loc_head_layers():
""" Create layers for regression
"""
loc_head_layers = [
layers.Conv2D(4 * 4, kernel_size=3, padding='same'),
layers.Conv2D(6 * 4, kernel_size=3, padding='same'),
layers.Conv2D(6 * 4, kernel_size=3, padding='same'),
layers.Conv2D(6 * 4, kernel_size=3, padding='same'),
layers.Conv2D(4 * 4, kernel_size=3, padding='same'),
layers.Conv2D(4 * 4, kernel_size=1)
]
return loc_head_layers