@@ -1647,6 +1647,87 @@ def __init__(
1647
1647
self .all_params .extend ([W ])
1648
1648
1649
1649
1650
+ class GroupConv2d (Layer ):
1651
+ """The :class:`GroupConv2d` class is 2D grouped convolution, see `here <https://blog.yani.io/filter-group-tutorial/>`__.
1652
+
1653
+ Parameters
1654
+ --------------
1655
+ layer : :class:`Layer`
1656
+ Previous layer.
1657
+ n_filter : int
1658
+ The number of filters.
1659
+ filter_size : int
1660
+ The filter size.
1661
+ stride : int
1662
+ The stride step.
1663
+ n_group : int
1664
+ The number of groups.
1665
+ act : activation function
1666
+ The activation function of this layer.
1667
+ padding : str
1668
+ The padding algorithm type: "SAME" or "VALID".
1669
+ W_init : initializer
1670
+ The initializer for the weight matrix.
1671
+ b_init : initializer or None
1672
+ The initializer for the bias vector. If None, skip biases.
1673
+ W_init_args : dictionary
1674
+ The arguments for the weight matrix initializer.
1675
+ b_init_args : dictionary
1676
+ The arguments for the bias vector initializer.
1677
+ name : str
1678
+ A unique layer name.
1679
+ """
1680
+
1681
+ def __init__ (
1682
+ self ,
1683
+ layer = None ,
1684
+ n_filter = 32 ,
1685
+ filter_size = (3 , 3 ),
1686
+ strides = (2 , 2 ),
1687
+ n_group = 2 ,
1688
+ act = tf .identity ,
1689
+ padding = 'SAME' ,
1690
+ W_init = tf .truncated_normal_initializer (stddev = 0.02 ),
1691
+ b_init = tf .constant_initializer (value = 0.0 ),
1692
+ W_init_args = None ,
1693
+ b_init_args = None ,
1694
+ name = 'groupconv' ,
1695
+ ): # Windaway
1696
+ if W_init_args is None :
1697
+ W_init_args = {}
1698
+ if b_init_args is None :
1699
+ b_init_args = {}
1700
+
1701
+ Layer .__init__ (self , name = name )
1702
+ self .inputs = layer .outputs
1703
+ groupConv = lambda i , k : tf .nn .conv2d (i , k , strides = [1 , strides [0 ], strides [1 ], 1 ], padding = padding )
1704
+ channels = int (self .inputs .get_shape ()[- 1 ])
1705
+ with tf .variable_scope (name ):
1706
+ We = tf .get_variable (
1707
+ name = 'W' , shape = [filter_size [0 ], filter_size [1 ], channels / n_group , n_filter ], initializer = W_init , dtype = D_TYPE , trainable = True , ** W_init_args )
1708
+ if b_init :
1709
+ bi = tf .get_variable (name = 'b' , shape = n_filter , initializer = b_init , dtype = D_TYPE , trainable = True , ** b_init_args )
1710
+ if n_group == 1 :
1711
+ conv = groupConv (self .inputs , We )
1712
+ else :
1713
+ inputGroups = tf .split (axis = 3 , num_or_size_splits = n_group , value = self .inputs )
1714
+ weightsGroups = tf .split (axis = 3 , num_or_size_splits = n_group , value = We )
1715
+ convGroups = [groupConv (i , k ) for i , k in zip (inputGroups , weightsGroups )]
1716
+ conv = tf .concat (axis = 3 , values = convGroups )
1717
+ if b_init :
1718
+ conv = tf .add (conv , bi , name = 'add' )
1719
+
1720
+ self .outputs = act (conv )
1721
+ self .all_layers = list (layer .all_layers )
1722
+ self .all_params = list (layer .all_params )
1723
+ self .all_drop = dict (layer .all_drop )
1724
+ self .all_layers .append (self .outputs )
1725
+ if b_init :
1726
+ self .all_params .extend ([We , bi ])
1727
+ else :
1728
+ self .all_params .append (We )
1729
+
1730
+
1650
1731
# Alias
1651
1732
AtrousConv1dLayer = atrous_conv1d
1652
1733
Conv1d = conv1d
0 commit comments