Skip to content

Commit a86008e

Browse files
authored
Merge pull request #467 from tensorlayer/release-1.8.4rc0
[release] 1.8.4rc1
2 parents 60cfcb1 + cd5e1e9 commit a86008e

File tree

11 files changed

+38
-30
lines changed

11 files changed

+38
-30
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ TensorLayer is a deep learning and reinforcement learning library on top of [Ten
2424
- Useful links: [Documentation](http://tensorlayer.readthedocs.io), [Examples](http://tensorlayer.readthedocs.io/en/latest/user/example.html), [中文文档](https://tensorlayercn.readthedocs.io), [中文书](http://www.broadview.com.cn/book/5059)
2525

2626
# News
27+
* [05 Apr] Release [models APIs](http://tensorlayer.readthedocs.io/en/latest/modules/models.html#) for well-known pretained networks.
2728
* [18 Mar] Release experimental APIs for binary networks.
2829
* [18 Jan] [《深度学习:一起玩转TensorLayer》](http://www.broadview.com.cn/book/5059) (Deep Learning using TensorLayer)
2930
* [17 Dec] Release experimental APIs for distributed training (by [TensorPort](https://tensorport.com)). See [tiny example](https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_mnist_distributed.py).

docs/conf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,9 @@
6767
# built documents.
6868
#
6969
# The short X.Y version.
70-
version = '1.8.4rc0'
70+
version = '1.8.4rc1'
7171
# The full version, including alpha/beta/rc tags.
72-
release = '1.8.4rc0'
72+
release = '1.8.4rc1'
7373

7474
# The language for content autogenerated by Sphinx. Refer to documentation
7575
# for a list of supported languages.
@@ -143,7 +143,7 @@
143143
# The name for this set of Sphinx documents.
144144
# "<project> v<release> documentation" by default.
145145
#
146-
# html_title = 'TensorLayer v1.8.4rc0'
146+
# html_title = 'TensorLayer v1.8.4rc1'
147147

148148
# A shorter title for the navigation bar. Default is the same as html_title.
149149
#

example/tutorial_mnist.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -119,13 +119,13 @@ def main_test_layers(model='relu'):
119119
n_batch += 1
120120
print(" val loss: %f" % (val_loss / n_batch))
121121
print(" val acc: %f" % (val_acc / n_batch))
122-
try:
123-
# You can visualize the weight of 1st hidden layer as follow.
124-
tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
125-
# You can also save the weight of 1st hidden layer to .npz file.
126-
# tl.files.save_npz([network.all_params[0]] , name='w1'+str(epoch+1)+'.npz')
127-
except: # pylint: disable=bare-except
128-
print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")
122+
# try:
123+
# # You can visualize the weight of 1st hidden layer as follow.
124+
# tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
125+
# # You can also save the weight of 1st hidden layer to .npz file.
126+
# # tl.files.save_npz([network.all_params[0]] , name='w1'+str(epoch+1)+'.npz')
127+
# except: # pylint: disable=bare-except
128+
# print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")
129129

130130
print('Evaluation')
131131
test_loss, test_acc, n_batch = 0, 0, 0
@@ -306,11 +306,11 @@ def main_test_stacked_denoise_AE(model='relu'):
306306
n_batch += 1
307307
print(" val loss: %f" % (val_loss / n_batch))
308308
print(" val acc: %f" % (val_acc / n_batch))
309-
try:
310-
# visualize the 1st hidden layer during fine-tune
311-
tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
312-
except: # pylint: disable=bare-except
313-
print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")
309+
# try:
310+
# # visualize the 1st hidden layer during fine-tune
311+
# tl.vis.draw_weights(network.all_params[0].eval(), second=10, saveable=True, shape=[28, 28], name='w1_' + str(epoch + 1), fig_idx=2012)
312+
# except: # pylint: disable=bare-except
313+
# print("You should change vis.draw_weights(), if you want to save the feature images for different dataset")
314314

315315
print('Evaluation')
316316
test_loss, test_acc, n_batch = 0, 0, 0
@@ -451,10 +451,10 @@ def main_test_cnn_layer():
451451
n_batch += 1
452452
print(" val loss: %f" % (val_loss / n_batch))
453453
print(" val acc: %f" % (val_acc / n_batch))
454-
try:
455-
tl.vis.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_' + str(epoch + 1), fig_idx=2012)
456-
except: # pylint: disable=bare-except
457-
print("You should change vis.CNN(), if you want to save the feature images for different dataset")
454+
# try:
455+
# tl.vis.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_' + str(epoch + 1), fig_idx=2012)
456+
# except: # pylint: disable=bare-except
457+
# print("You should change vis.CNN(), if you want to save the feature images for different dataset")
458458

459459
print('Evaluation')
460460
test_loss, test_acc, n_batch = 0, 0, 0
@@ -474,7 +474,7 @@ def main_test_cnn_layer():
474474
sess = tf.InteractiveSession()
475475

476476
# Dropout and Dropconnect
477-
main_test_layers(model='relu') # model = relu, dropconnect
477+
# main_test_layers(model='relu') # model = relu, dropconnect
478478

479479
# Single Denoising Autoencoder
480480
# main_test_denoise_AE(model='sigmoid') # model = relu, sigmoid
@@ -483,4 +483,4 @@ def main_test_cnn_layer():
483483
# main_test_stacked_denoise_AE(model='relu') # model = relu, sigmoid
484484

485485
# CNN
486-
# main_test_cnn_layer()
486+
main_test_cnn_layer()

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
setup(
1212
name="tensorlayer",
13-
version="1.8.4rc0",
13+
version="1.8.4rc1",
1414
include_package_data=True,
1515
author='TensorLayer Contributors',
1616
author_email='[email protected]',

tensorlayer/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
act = activation
2525
vis = visualize
2626

27-
__version__ = "1.8.4rc0"
27+
__version__ = "1.8.4rc1"
2828

2929
global_flag = {}
3030
global_dict = {}

tensorlayer/layers/core.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1387,8 +1387,6 @@ def __init__(
13871387
# self.all_layers = list(layer.all_layers)
13881388
# self.all_params = list(layer.all_params)
13891389
# self.all_drop = dict(layer.all_drop)
1390-
# self.all_drop.update({LayersConfig.set_keep[name]: keep})
1391-
# self.all_layers.append(self.outputs)
1392-
# self.all_params.extend([W, b])
1393-
1390+
self.all_drop.update({LayersConfig.set_keep[name]: keep})
13941391
self.all_layers.append(self.outputs)
1392+
self.all_params.extend([W, b])

tensorlayer/models/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""A collections of pre-defined well known models."""
1+
# """A collections of pre-defined well known models."""
22

33
from .vgg16 import VGG16
44
from .squeezenetv1 import SqueezeNetV1

tensorlayer/models/mobilenetv1.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ class MobileNetV1(Layer):
3232
Examples
3333
---------
3434
Classify ImageNet classes, see `tutorial_models_mobilenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_mobilenetv1.py>__`
35+
3536
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
3637
>>> # get the whole model
3738
>>> net = tl.models.MobileNetV1(x)
@@ -42,6 +43,7 @@ class MobileNetV1(Layer):
4243
>>> probs = tf.nn.softmax(net.outputs)
4344
4445
Extract features and Train a classifier with 100 classes
46+
4547
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
4648
>>> # get model without the last layer
4749
>>> cnn = tl.models.MobileNetV1(x, end_with='reshape')
@@ -57,6 +59,7 @@ class MobileNetV1(Layer):
5759
>>> train_params = tl.layers.get_variables_with_name('output')
5860
5961
Reuse model
62+
6063
>>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])
6164
>>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])
6265
>>> # get VGG without the last layer

tensorlayer/models/squeezenetv1.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ class SqueezeNetV1(Layer):
3232
Examples
3333
---------
3434
Classify ImageNet classes, see `tutorial_models_squeezenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_squeezenetv1.py>__`
35+
3536
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
3637
>>> # get the whole model
3738
>>> net = tl.models.SqueezeNetV1(x)
@@ -42,6 +43,7 @@ class SqueezeNetV1(Layer):
4243
>>> probs = tf.nn.softmax(net.outputs)
4344
4445
Extract features and Train a classifier with 100 classes
46+
4547
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
4648
>>> # get model without the last layer
4749
>>> cnn = tl.models.SqueezeNetV1(x, end_with='fire9')
@@ -57,6 +59,7 @@ class SqueezeNetV1(Layer):
5759
>>> train_params = tl.layers.get_variables_with_name('output')
5860
5961
Reuse model
62+
6063
>>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])
6164
>>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])
6265
>>> # get VGG without the last layer

tensorlayer/models/vgg16.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,7 @@ class VGG16(VGG16Base):
245245
Examples
246246
---------
247247
Classify ImageNet classes with VGG16, see `tutorial_models_vgg16.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_vgg16.py>__`
248+
248249
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
249250
>>> # get the whole model
250251
>>> vgg = tl.models.VGG16(x)
@@ -255,6 +256,7 @@ class VGG16(VGG16Base):
255256
>>> probs = tf.nn.softmax(vgg.outputs)
256257
257258
Extract features with VGG16 and Train a classifier with 100 classes
259+
258260
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
259261
>>> # get VGG without the last layer
260262
>>> vgg = tl.models.VGG16(x, end_with='fc2_relu')
@@ -269,6 +271,7 @@ class VGG16(VGG16Base):
269271
>>> train_params = tl.layers.get_variables_with_name('out')
270272
271273
Reuse model
274+
272275
>>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])
273276
>>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])
274277
>>> # get VGG without the last layer

tests/test_layers_core.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -175,8 +175,8 @@
175175
if len(net.all_layers) != 2:
176176
raise Exception("layers dont match")
177177

178-
if len(net.all_params) != 2:
178+
if len(net.all_params) != 4:
179179
raise Exception("params dont match")
180180

181-
if net.count_params() != 78500:
181+
if net.count_params() != 88600:
182182
raise Exception("params dont match")

0 commit comments

Comments
 (0)