Skip to content

Commit ba71d18

Browse files
zsdonghaowagamamaz
authored andcommitted
release Binary Nets, release 1.8.1 (#423)
* update bnn cnn, htanh * bconv example * release binary * release 1.8.2
1 parent 46df7a7 commit ba71d18

File tree

8 files changed

+98
-21
lines changed

8 files changed

+98
-21
lines changed

docs/conf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,9 @@
6767
# built documents.
6868
#
6969
# The short X.Y version.
70-
version = '1.8.1'
70+
version = '1.8.2'
7171
# The full version, including alpha/beta/rc tags.
72-
release = '1.8.1'
72+
release = '1.8.2'
7373

7474
# The language for content autogenerated by Sphinx. Refer to documentation
7575
# for a list of supported languages.
@@ -143,7 +143,7 @@
143143
# The name for this set of Sphinx documents.
144144
# "<project> v<release> documentation" by default.
145145
#
146-
# html_title = 'TensorLayer v1.8.1'
146+
# html_title = 'TensorLayer v1.8.2'
147147

148148
# A shorter title for the navigation bar. Default is the same as html_title.
149149
#

docs/modules/activation.rst

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ For more complex activation, TensorFlow API will be required.
3131
leaky_relu
3232
swish
3333
sign
34+
hard_tanh
3435
pixel_wise_softmax
3536

3637
Identity
@@ -53,6 +54,10 @@ Sign
5354
---------------------
5455
.. autofunction:: sign
5556

57+
Hard Tanh
58+
---------------------
59+
.. autofunction:: hard_tanh
60+
5661
Pixel-wise softmax
5762
--------------------
5863
.. autofunction:: pixel_wise_softmax

docs/modules/layers.rst

Lines changed: 38 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ At the end, for a layer with parameters, we also append the parameters into ``al
152152
name ='simple_dense',
153153
):
154154
# check layer name (fixed)
155-
Layer.__init__(self, name=name)
155+
Layer.__init__(self, layer=layer, name=name)
156156
157157
# the input of this layer is the output of previous layer (fixed)
158158
self.inputs = layer.outputs
@@ -169,11 +169,6 @@ At the end, for a layer with parameters, we also append the parameters into ``al
169169
# tensor operation
170170
self.outputs = act(tf.matmul(self.inputs, W) + b)
171171
172-
# get stuff from previous layer (fixed)
173-
self.all_layers = list(layer.all_layers)
174-
self.all_params = list(layer.all_params)
175-
self.all_drop = dict(layer.all_drop)
176-
177172
# update layer (customized)
178173
self.all_layers.extend( [self.outputs] )
179174
self.all_params.extend( [W, b] )
@@ -336,6 +331,11 @@ Layer list
336331

337332
SlimNetsLayer
338333

334+
BinaryDenseLayer
335+
BinaryConv2d
336+
SignLayer
337+
ScaleLayer
338+
339339
PReluLayer
340340

341341
MultiplexerLayer
@@ -799,6 +799,38 @@ see `Slim-model <https://github.com/tensorflow/models/tree/master/research/slim>
799799
.. autoclass:: KerasLayer
800800

801801

802+
Binary Nets
803+
------------------
804+
805+
Read Me
806+
^^^^^^^^^^^^^^
807+
808+
This is an experimental API package for building Binary Nets.
809+
We are using matrix multiplication rather than add-minus and bit-count operation at the moment.
810+
Therefore, these APIs would not speed up the inferencing, for production, you can train model via TensorLayer and deploy the model into other customized C/C++ implementation (We probably provide users an extra C/C++ binary net framework that can load model from TensorLayer).
811+
812+
Note that, these experimental APIs can be changed in anytime.
813+
814+
Binarized Dense
815+
^^^^^^^^^^^^^^^^^
816+
.. autoclass:: BinaryDenseLayer
817+
818+
819+
Binarized Conv2d
820+
^^^^^^^^^^^^^^^^^^
821+
.. autoclass:: BinaryConv2d
822+
823+
824+
Sign
825+
^^^^^^^^^^^^^^
826+
.. autoclass:: SignLayer
827+
828+
829+
Scale
830+
^^^^^^^^^^^^^^
831+
.. autoclass:: ScaleLayer
832+
833+
802834
Parametric activation layer
803835
---------------------------
804836

example/tutorial_binarynet_mnist_cnn.py

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
X_train, y_train, X_val, y_val, X_test, y_test = \
99
tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
10+
# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)
1011

1112
sess = tf.InteractiveSession()
1213

@@ -17,25 +18,29 @@
1718

1819

1920
def model(x, is_train=True, reuse=False):
21+
# In BNN, all the layers inputs are binary, with the exception of the first layer.
22+
# ref: https://github.com/itayhubara/BinaryNet.tf/blob/master/models/BNN_cifar10.py
2023
with tf.variable_scope("binarynet", reuse=reuse):
2124
net = tl.layers.InputLayer(x, name='input')
2225
net = tl.layers.BinaryConv2d(net, 32, (5, 5), (1, 1), padding='SAME', name='bcnn1')
2326
net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool1')
27+
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn1')
2428

25-
net = tl.layers.BatchNormLayer(net, is_train=is_train, name='bn')
26-
net = tl.layers.SignLayer(net, name='sign2')
29+
net = tl.layers.SignLayer(net)
2730
net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), padding='SAME', name='bcnn2')
2831
net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool2')
32+
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn2')
2933

30-
net = tl.layers.SignLayer(net, name='sign2')
3134
net = tl.layers.FlattenLayer(net, name='flatten')
32-
net = tl.layers.DropoutLayer(net, 0.5, True, is_train, name='drop1')
33-
# net = tl.layers.DenseLayer(net, 256, act=tf.nn.relu, name='dense')
35+
net = tl.layers.DropoutLayer(net, 0.8, True, is_train, name='drop1')
36+
net = tl.layers.SignLayer(net)
3437
net = tl.layers.BinaryDenseLayer(net, 256, name='dense')
35-
net = tl.layers.DropoutLayer(net, 0.5, True, is_train, name='drop2')
36-
# net = tl.layers.DenseLayer(net, 10, act=tf.identity, name='output')
38+
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn3')
39+
40+
net = tl.layers.DropoutLayer(net, 0.8, True, is_train, name='drop2')
41+
net = tl.layers.SignLayer(net)
3742
net = tl.layers.BinaryDenseLayer(net, 10, name='bout')
38-
# net = tl.layers.ScaleLayer(net, name='scale')
43+
net = tl.layers.BatchNormLayer(net, is_train=is_train, name='bno')
3944
return net
4045

4146

@@ -66,7 +71,7 @@ def model(x, is_train=True, reuse=False):
6671
n_epoch = 200
6772
print_freq = 5
6873

69-
# print(sess.run(net_test.all_params)) # print real value of parameters
74+
# print(sess.run(net_test.all_params)) # print real values of parameters
7075

7176
for epoch in range(n_epoch):
7277
start_time = time.time()

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
setup(
1212
name="tensorlayer",
13-
version="1.8.1",
13+
version="1.8.2",
1414
include_package_data=True,
1515
author='TensorLayer Contributors',
1616
author_email='[email protected]',

tensorlayer/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
act = activation
2424
vis = visualize
2525

26-
__version__ = "1.8.1"
26+
__version__ = "1.8.2"
2727

2828
global_flag = {}
2929
global_dict = {}

tensorlayer/activation.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,28 @@ def sign(x): # https://github.com/AngusG/tensorflow-xnor-bnn/blob/master/models
168168
# return tf.sign(x), grad
169169

170170

171+
def hard_tanh(x, name='htanh'):
172+
"""Hard tanh activation function.
173+
174+
Which is a ramp function with low bound of -1 and upper bound of 1, shortcut is ``htanh`.
175+
176+
Parameters
177+
----------
178+
x : Tensor
179+
input.
180+
name : str
181+
The function name (optional).
182+
183+
Returns
184+
-------
185+
Tensor
186+
A ``Tensor`` in the same type as ``x``.
187+
188+
"""
189+
# with tf.variable_scope("hard_tanh"):
190+
return tf.clip_by_value(x, -1, 1, name=name)
191+
192+
171193
@deprecated("2018-06-30", "This API will be deprecated soon as tf.nn.softmax can do the same thing.")
172194
def pixel_wise_softmax(x, name='pixel_wise_softmax'):
173195
"""Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
@@ -204,3 +226,4 @@ def pixel_wise_softmax(x, name='pixel_wise_softmax'):
204226
# Alias
205227
linear = identity
206228
lrelu = leaky_relu
229+
htanh = hard_tanh

tensorlayer/layers/binary.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@
55

66
__all__ = [
77
'BinaryDenseLayer',
8+
'BinaryConv2d',
89
'SignLayer',
910
'ScaleLayer',
10-
'BinaryConv2d',
1111
]
1212

1313

@@ -142,6 +142,18 @@ class BinaryConv2d(Layer):
142142
name : str
143143
A unique layer name.
144144
145+
Examples
146+
---------
147+
>>> net = tl.layers.InputLayer(x, name='input')
148+
>>> net = tl.layers.BinaryConv2d(net, 32, (5, 5), (1, 1), padding='SAME', name='bcnn1')
149+
>>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool1')
150+
>>> net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn1')
151+
...
152+
>>> net = tl.layers.SignLayer(net)
153+
>>> net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), padding='SAME', name='bcnn2')
154+
>>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool2')
155+
>>> net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=is_train, name='bn2')
156+
145157
"""
146158

147159
def __init__(

0 commit comments

Comments
 (0)