@@ -416,6 +416,16 @@ def my_dropout(x):
416
416
out = tf .reshape (conv2 , [1 , 2 , 3 , 6 ], 'reshaped' )
417
417
save (inp , out , 'reshape_nchw' )
418
418
################################################################################
419
+ inp = tf .placeholder (tf .float32 , [1 , 5 , 5 , 3 ], 'input' )
420
+ out = tf .keras .layers .MaxPool2D ((2 , 2 ), 4 , "SAME" , name = 'pooling' )(inp )
421
+ reshape = tf .reshape (out , [- 1 , 1 , 1 , 12 ], 'reshaped' )
422
+ conv_filter = tf .get_variable ('filter' , [1 , 1 , 12 , 4 ],
423
+ initializer = tf .truncated_normal_initializer (),
424
+ dtype = tf .float32 )
425
+ conv = tf .nn .conv2d (input = reshape , filters = conv_filter , strides = [1 , 1 , 1 , 1 ],
426
+ padding = 'SAME' , name = 'conv2d' )
427
+ save (inp , conv , 'reshape_conv' )
428
+ ################################################################################
419
429
inp = tf .placeholder (tf .float32 , [1 , 6 , 5 , 3 ], 'input' )
420
430
conv = tf .layers .conv2d (inputs = inp , filters = 3 , kernel_size = [1 , 1 ],
421
431
activation = tf .nn .relu ,
@@ -792,6 +802,12 @@ def pad_depth(x, desired_channels):
792
802
final_out = tf .math .add (out1 , out2 , name = 'tf_sum' )
793
803
save (inp , final_out , 'eltwise_add_vec' )
794
804
################################################################################
805
+ inp = tf .placeholder (tf .float32 , [1 , 4 , 4 , 3 ], 'input' )
806
+ out1 = tf .keras .layers .MaxPool2D ((2 , 2 ), 4 , "SAME" , name = "pooling" )(inp )
807
+ out2 = tf .keras .layers .ReLU (name = "relu" )(inp )
808
+ final_out = tf .keras .layers .Multiply (name = 'tf_mul' )([out1 , out2 ])
809
+ save (inp , final_out , 'eltwise_mul_vec' )
810
+ ################################################################################
795
811
inp = tf .placeholder (tf .float32 , [None , 2 , 3 , 4 ], 'input' )
796
812
conv = tf .layers .conv2d (inp , filters = 3 , kernel_size = [1 , 1 ])
797
813
softmax = tf .contrib .slim .softmax (conv )
0 commit comments