diff --git a/.travis.yml b/.travis.yml index d86c89a17..f8c24f22e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -117,4 +117,3 @@ deploy: python: '3.6' condition: '$_TF_VERSION = 2.0.0a0' # condition: '$_TF_VERSION = 1.11.0' - diff --git a/CHANGELOG.md b/CHANGELOG.md index bac7c65fe..24d2b0ee5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,7 +67,14 @@ To release a new version, please update the changelog as followed: -## [Unreleased] +## [2.0.1] - 2019-5-17 + + +A maintain release. + +### Changed +- remove `tl.layers.initialize_global_variables(sess)` (PR #931) +- support `trainable_weights` (PR #966) ### Added - Layer @@ -78,29 +85,27 @@ To release a new version, please update the changelog as followed: - change `tl.layers.core`, `tl.models.core` (PR #966) - change `weights` into `all_weights`, `trainable_weights`, `nontrainable_weights` + ### Dependencies Update - nltk>=3.3,<3.4 => nltk>=3.3,<3.5 (PR #892) - pytest>=3.6,<3.11 => pytest>=3.6,<4.1 (PR #889) - yapf>=0.22,<0.25 => yapf==0.25.0 (PR #896) - imageio==2.5.0 progressbar2==3.39.3 scikit-learn==0.21.0 scikit-image==0.15.0 scipy==1.2.1 wrapt==1.11.1 pymongo==3.8.0 sphinx==2.0.1 wrapt==1.11.1 opencv-python==4.1.0.25 requests==2.21.0 tqdm==4.31.1 lxml==4.3.3 pycodestyle==2.5.0 sphinx==2.0.1 yapf==0.27.0(PR #967) -### Deprecated - ### Fixed - fix docs of models @zsdonghao #957 - In `BatchNorm`, keep dimensions of mean and variance to suit `channels first` (PR #963) - -### Removed - -### Security - ### Contributors +- @warshallrho: #PR966 - @zsdonghao: #931 - @yd-yin: #963 -## [2.0.0-alpha] - 2019-05-04 + +## [2.0.0] - 2019-05-04 + +To many PR for this update, please check [here](https://github.com/tensorlayer/tensorlayer/releases/tag/2.0.0) for more details. ### Changed * update for TensorLayer 2.0.0 alpha version (PR #952) @@ -119,6 +124,7 @@ To release a new version, please update the changelog as followed: - @ChrisWu1997 - @warshallrho + ## [1.11.1] - 2018-11-15 ### Changed diff --git a/README.md b/README.md index bd2f074bb..edd24c926 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ and fine-tune. On the contrary, TensorLayer APIs are generally lightweight, flex Users often find it easy to start with the examples and tutorials, and then dive into TensorFlow seamlessly. In addition, TensorLayer does not create library lock-in through native supports for importing components from Keras. -TensorLayer has a fast growing usage among top researchers and engineers, from universities like +TensorLayer has a fast growing usage among top researchers and engineers, from universities like Peking University, Imperial College London, UC Berkeley, Carnegie Mellon University, Stanford University, and University of Technology of Compiegne (UTC), and companies like Google, Microsoft, Alibaba, Tencent, Xiaomi, and Bloomberg. diff --git a/examples/basic_tutorials/README.md b/examples/basic_tutorials/README.md index b00f4e9a7..8b1378917 100644 --- a/examples/basic_tutorials/README.md +++ b/examples/basic_tutorials/README.md @@ -1,44 +1 @@ -Something you need to know: -### 1. Static and dynamic model - -1) `tutorial_mnist_mlp_static.py`: static model - -2) `tutorial_mnist_mlp_dynamic.py`: dynamic model - -### 2. Switching Training and testing - -There are two ways to switch the training and testing mode: - -1 ) use Pytorch-like method, turn on and off the training/evaluation as follow: - -```python -model.train() # enable dropout, batch norm decay and etc -y1 = model(x) -model.eval() # disable dropout, fix batch norm weights and etc -y2 = model(x) -``` - -2) use TensorLayer 1.x method, input `is_train` to the model while inferencing. - -```python -y1 = model(x, is_train=True) -y2 = model(x, is_train=False) -``` - - - -### Data augmentation - -- Data augmentation is essential for training, while if the augmentation is complex, it will slow down the training. -We used CIFAR10 classification as example of data augmentation. -- For the best performance, please use `tutorial_cifar10_datasetapi.py`. -- It is suggested to use TensorFlow's DataSet API (`tf.data` and `tf.image`) and TFRecord for the sake of performance and generalibity. -- For TFRecord and Dataset API, -TFRecord needs to first store all data into TFRecord format, while Dataset API is simpler that can directly use data XXXX. - -### Float16 -- For Float16, some GPUs can speed up but some cannot. - -### Others -- For distributed training \ No newline at end of file diff --git a/tensorlayer/initializers.py b/tensorlayer/initializers.py index 666777824..aaf4f37ac 100644 --- a/tensorlayer/initializers.py +++ b/tensorlayer/initializers.py @@ -192,27 +192,6 @@ def deconv2d_bilinear_upsampling_initializer(shape): A constant initializer with weights set to correspond to per channel bilinear upsampling when passed as W_int in DeConv2dLayer - Examples - -------- - Upsampling by a factor of 2, ie e.g 100->200 - >>> import tensorflow as tf - >>> import tensorlayer as tl - >>> rescale_factor = 2 - >>> imsize = 128 - >>> num_channels = 3 - >>> num_in_channels = 3 - >>> num_out_channels = 3 - >>> filter_shape = (5, 5, num_out_channels, num_in_channels) - >>> ni = tl.layers.Input(shape=(1, imsize, imsize, num_channels)) - >>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape) - >>> net = tl.layers.DeConv2dLayer( - ... shape=filter_shape, - ... outputs_shape=(1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels), - ... strides=(1, rescale_factor, rescale_factor, 1), - ... W_init=bilinear_init, - ... padding='SAME', - ... act=None, name='g/h1/decon2d')(ni) - """ if shape[0] != shape[1]: raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes') diff --git a/tensorlayer/package_info.py b/tensorlayer/package_info.py index 4e8f417cd..c9cced2ea 100644 --- a/tensorlayer/package_info.py +++ b/tensorlayer/package_info.py @@ -4,7 +4,7 @@ MAJOR = 2 MINOR = 0 -PATCH = 0 +PATCH = 1 PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, prerelease) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)