Skip to content

Commit 20a03b2

Browse files
committed
fix SE-ResNeXt-152_parallel_exe
1 parent 3ce67e7 commit 20a03b2

File tree

1 file changed

+49
-36
lines changed

1 file changed

+49
-36
lines changed

fluid/SE-ResNeXt-152/train_parallel_executor.py

Lines changed: 49 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -65,18 +65,12 @@ def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1,
6565
groups=groups,
6666
act=None,
6767
bias_attr=False)
68-
return fluid.layers.batch_norm(input=conv, act=act, momentum=0.1)
68+
return fluid.layers.batch_norm(input=conv, act=act)
6969

7070

7171
def squeeze_excitation(input, num_channels, reduction_ratio):
72-
#pool = fluid.layers.pool2d(
73-
# input=input, pool_size=0, pool_type='avg', global_pooling=True)
74-
conv = input
75-
shape = conv.shape
76-
reshape = fluid.layers.reshape(
77-
x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
78-
pool = fluid.layers.reduce_mean(input=reshape, dim=2)
79-
72+
pool = fluid.layers.pool2d(
73+
input=input, pool_size=0, pool_type='avg', global_pooling=True)
8074
squeeze = fluid.layers.fc(input=pool,
8175
size=num_channels / reduction_ratio,
8276
act='relu')
@@ -100,13 +94,11 @@ def shortcut(input, ch_out, stride):
10094

10195

10296
def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
103-
# The number of first 1x1 convolutional channels for each bottleneck build block
104-
# was halved to reduce the compution cost.
10597
conv0 = conv_bn_layer(
10698
input=input, num_filters=num_filters, filter_size=1, act='relu')
10799
conv1 = conv_bn_layer(
108100
input=conv0,
109-
num_filters=num_filters * 2,
101+
num_filters=num_filters,
110102
filter_size=3,
111103
stride=stride,
112104
groups=cardinality,
@@ -123,20 +115,44 @@ def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
123115
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
124116

125117

126-
def SE_ResNeXt152(input, class_dim):
127-
cardinality = 64
128-
reduction_ratio = 16
129-
depth = [3, 8, 36, 3]
130-
num_filters = [128, 256, 512, 1024]
131-
132-
conv = conv_bn_layer(
133-
input=input, num_filters=64, filter_size=3, stride=2, act='relu')
134-
conv = conv_bn_layer(
135-
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
136-
conv = conv_bn_layer(
137-
input=conv, num_filters=128, filter_size=3, stride=1, act='relu')
138-
conv = fluid.layers.pool2d(
139-
input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
118+
def SE_ResNeXt(input, class_dim, infer=False, layers=50):
119+
supported_layers = [50, 152]
120+
if layers not in supported_layers:
121+
print("supported layers are", supported_layers, "but input layer is ",
122+
layers)
123+
exit()
124+
if layers == 50:
125+
cardinality = 32
126+
reduction_ratio = 16
127+
depth = [3, 4, 6, 3]
128+
num_filters = [128, 256, 512, 1024]
129+
130+
conv = conv_bn_layer(
131+
input=input, num_filters=64, filter_size=7, stride=2, act='relu')
132+
conv = fluid.layers.pool2d(
133+
input=conv,
134+
pool_size=3,
135+
pool_stride=2,
136+
pool_padding=1,
137+
pool_type='max')
138+
elif layers == 152:
139+
cardinality = 64
140+
reduction_ratio = 16
141+
depth = [3, 8, 36, 3]
142+
num_filters = [128, 256, 512, 1024]
143+
144+
conv = conv_bn_layer(
145+
input=input, num_filters=64, filter_size=3, stride=2, act='relu')
146+
conv = conv_bn_layer(
147+
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
148+
conv = conv_bn_layer(
149+
input=conv, num_filters=128, filter_size=3, stride=1, act='relu')
150+
conv = fluid.layers.pool2d(
151+
input=conv,
152+
pool_size=3,
153+
pool_stride=2,
154+
pool_padding=1,
155+
pool_type='max')
140156

141157
for block in range(len(depth)):
142158
for i in range(depth[block]):
@@ -147,16 +163,13 @@ def SE_ResNeXt152(input, class_dim):
147163
cardinality=cardinality,
148164
reduction_ratio=reduction_ratio)
149165

150-
#pool = fluid.layers.pool2d(
151-
# input=conv, pool_size=0, pool_type='avg', global_pooling=True)
152-
shape = conv.shape
153-
reshape = fluid.layers.reshape(
154-
x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
155-
pool = fluid.layers.reduce_mean(input=reshape, dim=2)
156-
#yancanxiang: A drop out layer(with a drop ratio of 0.2) was inserted before the classifier layer.
157-
dropout = fluid.layers.dropout(x=pool, dropout_prob=0.2)
158-
# Classifier layer:
159-
out = fluid.layers.fc(input=dropout, size=class_dim, act='softmax')
166+
pool = fluid.layers.pool2d(
167+
input=conv, pool_size=0, pool_type='avg', global_pooling=True)
168+
if not infer:
169+
drop = fluid.layers.dropout(x=pool, dropout_prob=0.2)
170+
else:
171+
drop = pool
172+
out = fluid.layers.fc(input=drop, size=class_dim, act='softmax')
160173
return out
161174

162175

0 commit comments

Comments
 (0)