Skip to content

Add version_info in model.config. #992

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 47 commits into from
Jun 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
f51838f
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 12, 2019
341cf1c
(non)trainable weights, layer all_layers
warshallrho May 12, 2019
58edaed
weights -> all_weights
warshallrho May 12, 2019
647d953
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 12, 2019
ad973fd
weights -> all_weights, trainable weights, nontrainable_weights
warshallrho May 12, 2019
0af6056
fix bugs, yapf
warshallrho May 13, 2019
37bb705
fix bugs
warshallrho May 13, 2019
be27eb2
fix bugs
warshallrho May 13, 2019
760b219
fix bugs
warshallrho May 13, 2019
4024ace
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 20, 2019
cc1feb7
alpha version, update network config
warshallrho May 25, 2019
372aa6b
fix bug
warshallrho May 25, 2019
1ad5b82
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 25, 2019
e79b8d6
add files
warshallrho May 25, 2019
74c6ada
Update CHANGELOG.md
warshallrho May 25, 2019
0866b56
fix bugs
warshallrho May 25, 2019
a795f72
yapf
warshallrho May 25, 2019
03e990c
update act in base layer and related layers
warshallrho May 25, 2019
0261521
fix bugs
warshallrho May 25, 2019
5549234
fix bug
warshallrho May 25, 2019
0f1ae82
fix bugs
warshallrho May 25, 2019
c415b79
Merge branch 'master' into master
zsdonghao May 29, 2019
daef3c3
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho May 29, 2019
65ee5a6
parse float in lrelu
warshallrho May 29, 2019
39ba8b7
Merge branch 'master' of https://github.com/warshallrho/tensorlayer2
warshallrho May 29, 2019
d7b8e7d
yapf
warshallrho May 29, 2019
d9c3b53
Merge branch 'master' into master
zsdonghao Jun 2, 2019
969b4ee
Merge branch 'master' of https://github.com/tensorlayer/tensorlayer
warshallrho Jun 6, 2019
9adae3f
add version_info into model.config
warshallrho Jun 6, 2019
c4d8d0f
Merge branch 'master' of https://github.com/warshallrho/tensorlayer2
warshallrho Jun 6, 2019
4a6dab3
changelog
warshallrho Jun 6, 2019
f8dfe46
Update CHANGELOG.md
warshallrho Jun 6, 2019
4bd6b8f
Update CHANGELOG.md
warshallrho Jun 6, 2019
fc6b9a2
remove save_time in config in unittest, to assert old_config == new_c…
warshallrho Jun 6, 2019
0a73a32
fix bugs
warshallrho Jun 6, 2019
ce346b4
yapf
warshallrho Jun 6, 2019
1c7fafa
Update recurrent.py
zsdonghao Jun 6, 2019
7ef8d00
Update recurrent.py
zsdonghao Jun 6, 2019
9b4fc54
remove blank line
warshallrho Jun 6, 2019
229ecff
Merge branch 'master' of https://github.com/warshallrho/tensorlayer2
warshallrho Jun 6, 2019
6bb1745
Update test_model_save_graph.py
warshallrho Jun 6, 2019
95f4da1
Update recurrent.py
zsdonghao Jun 6, 2019
4680885
Update recurrent.py (trial)
warshallrho Jun 6, 2019
1518f61
update recurrent.py(trial)
warshallrho Jun 6, 2019
cbfc51b
update recurrent.py(trial)
warshallrho Jun 6, 2019
a633dcb
Update recurrent.py
warshallrho Jun 6, 2019
eaed1ce
Update recurrent.py
warshallrho Jun 6, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,16 @@ To release a new version, please update the changelog as followed:

### Contributors

## [2.0.3]

### Changed
- Add version_info in model.config.

### Fixed

### Contributors
- @warshallrho:

## [2.0.2] - 2019-6-5

### Changed
Expand Down
31 changes: 17 additions & 14 deletions tensorlayer/files/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,21 +181,23 @@ def save_hdf5_graph(network, filepath='model.hdf5', save_weights=False, customiz
logging.info("[*] Saving TL model into {}, saving weights={}".format(filepath, save_weights))

model_config = network.config # net2static_graph(network)
model_config["version_info"]["save_date"] = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc
).isoformat()
model_config_str = str(model_config)
customized_data_str = str(customized_data)
version_info = {
"tensorlayer_version": tl.__version__,
"backend": "tensorflow",
"backend_version": tf.__version__,
"training_device": "gpu",
"save_date": datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
}
version_info_str = str(version_info)
# version_info = {
# "tensorlayer_version": tl.__version__,
# "backend": "tensorflow",
# "backend_version": tf.__version__,
# "training_device": "gpu",
# "save_date": datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
# }
# version_info_str = str(version_info)

with h5py.File(filepath, 'w') as f:
f.attrs["model_config"] = model_config_str.encode('utf8')
f.attrs["customized_data"] = customized_data_str.encode('utf8')
f.attrs["version_info"] = version_info_str.encode('utf8')
# f.attrs["version_info"] = version_info_str.encode('utf8')
if save_weights:
_save_weights_to_hdf5_group(f, network.all_layers)
f.flush()
Expand Down Expand Up @@ -315,8 +317,12 @@ def load_hdf5_graph(filepath='model.hdf5', load_weights=False):

f = h5py.File(filepath, 'r')

version_info_str = f.attrs["version_info"].decode('utf8')
version_info = eval(version_info_str)
model_config_str = f.attrs["model_config"].decode('utf8')
model_config = eval(model_config_str)

# version_info_str = f.attrs["version_info"].decode('utf8')
# version_info = eval(version_info_str)
version_info = model_config["version_info"]
backend_version = version_info["backend_version"]
tensorlayer_version = version_info["tensorlayer_version"]
if backend_version != tf.__version__:
Expand All @@ -332,9 +338,6 @@ def load_hdf5_graph(filepath='model.hdf5', load_weights=False):
)
)

model_config_str = f.attrs["model_config"].decode('utf8')
model_config = eval(model_config_str)

M = static_graph2net(model_config)
if load_weights:
if not ('layer_names' in f.attrs.keys()):
Expand Down
19 changes: 17 additions & 2 deletions tensorlayer/layers/recurrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,24 +42,31 @@ class RNN(Layer):
A RNN cell implemented by tf.keras
- E.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell
- Note TF2.0+, TF1.0+ and TF1.0- are different

return_last_output : boolean
Whether return last output or all outputs in a sequence.

- If True, return the last output, "Sequence input and single output"
- If False, return all outputs, "Synced sequence input and output"
- In other word, if you want to stack more RNNs on this layer, set to False

In a dynamic model, `return_last_output` can be updated when it is called in customised forward().
By default, `False`.
return_seq_2d : boolean
Only consider this argument when `return_last_output` is `False`

- If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it.
- If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it.

In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward().
By default, `False`.
return_last_state: boolean
Whether to return the last state of the RNN cell. The state is a list of Tensor.
For simple RNN and GRU, last_state = [last_output]; For LSTM, last_state = [last_output, last_cell_state]

- If True, the layer will return outputs and the final state of the cell.
- If False, the layer will return outputs only.

In a dynamic model, `return_last_state` can be updated when it is called in customised forward().
By default, `False`.
in_channels: int
Expand All @@ -74,6 +81,7 @@ class RNN(Layer):
For synced sequence input and output, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm.py>`__

A simple regression model below.

>>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size])
>>> rnn_out, lstm_state = tl.layers.RNN(
>>> cell=tf.keras.layers.LSTMCell(units=hidden_size, dropout=0.1),
Expand All @@ -85,6 +93,7 @@ class RNN(Layer):
>>> # If LSTMCell is applied, the rnn_state is [h, c] where h the hidden state and c the cell state of LSTM.

A stacked RNN model.

>>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size])
>>> rnn_out1 = tl.layers.RNN(
>>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1),
Expand All @@ -101,7 +110,6 @@ class RNN(Layer):
-----
Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`.


"""

def __init__(
Expand Down Expand Up @@ -237,6 +245,7 @@ class BiRNN(Layer):
Whether to return the last state of the two cells. The state is a list of Tensor.
- If True, the layer will return outputs, the final state of `fw_cell` and the final state of `bw_cell`.
- If False, the layer will return outputs only.

In a dynamic model, `return_last_state` can be updated when it is called in customised forward().
By default, `False`.
in_channels: int
Expand All @@ -249,6 +258,7 @@ class BiRNN(Layer):
Examples
--------
A simple regression model below.

>>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size])
>>> # the fw_cell and bw_cell can be different
>>> rnnlayer = tl.layers.BiRNN(
Expand All @@ -266,6 +276,7 @@ class BiRNN(Layer):
>>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_out, rnn_fw_state[0], rnn_bw_state[0]])

A stacked BiRNN model.

>>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size])
>>> rnn_out1 = tl.layers.BiRNN(
>>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1),
Expand All @@ -281,7 +292,6 @@ class BiRNN(Layer):
>>> outputs = tl.layers.Reshape([-1, num_steps])(dense)
>>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs)


Notes
-----
Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see layer :class:`Reshape`.
Expand Down Expand Up @@ -612,10 +622,12 @@ class ConvLSTM(Layer):
- If True, return the last output, "Sequence input and single output".
- If False, return all outputs, "Synced sequence input and output".
- In other word, if you want to stack more RNNs on this layer, set to False.

return_seq_2d : boolean
Only consider this argument when `return_last_output` is `False`
- If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it.
- If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it.

name : str
A unique layer name.

Expand Down Expand Up @@ -886,6 +898,7 @@ class Seq2Seq(Layer):
A TensorFlow core RNN cell
- see `RNN Cells in TensorFlow <https://www.tensorflow.org/api_docs/python/>`__
- Note TF1.0+ and TF1.0- are different

cell_init_args : dictionary or None
The arguments for the cell initializer.
n_hidden : int
Expand All @@ -903,12 +916,14 @@ class Seq2Seq(Layer):
dropout : tuple of float or int
The input and output keep probability (input_keep_prob, output_keep_prob).
- If one int, input and output keep probability are the same.

n_layer : int
The number of RNN layers, default is 1.
return_seq_2d : boolean
Only consider this argument when `return_last_output` is `False`
- If True, return 2D Tensor [n_example, 2 * n_hidden], for stacking DenseLayer after it.
- If False, return 3D Tensor [n_example/n_steps, n_steps, 2 * n_hidden], for stacking multiple RNN after it.

name : str
A unique layer name.

Expand Down
15 changes: 8 additions & 7 deletions tensorlayer/models/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,13 +443,14 @@ def config(self):
_config.update({"name": None})
else:
_config.update({"name": self.name})
# versionInfo = {
# "tensorlayer_version": tl.__version__,
# "backend": "tensorflow",
# "backend_version": tf.__version__,
# "training_device": "gpu",
# }
# _config.update(versionInfo)
version_info = {
"tensorlayer_version": tl.__version__,
"backend": "tensorflow",
"backend_version": tf.__version__,
"training_device": "gpu",
"save_date": None,
}
_config["version_info"] = version_info
# if self.outputs is None:
# raise RuntimeError(
# "Dynamic mode does not support config yet."
Expand Down
65 changes: 53 additions & 12 deletions tests/models/test_model_save_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@
from tests.utils import CustomTestCase


def RemoveDateInConfig(config):
config["version_info"]["save_date"] = None
return config


def basic_static_model():
ni = Input((None, 24, 24, 3))
nn = Conv2d(16, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, name="conv1")(ni)
Expand Down Expand Up @@ -42,7 +47,10 @@ def test_save(self):
M1.save(filepath="basic_model_without_weights.hdf5", save_weights=False)
M2 = Model.load(filepath="basic_model_without_weights.hdf5", load_weights=False)

self.assertEqual(M1.config, M2.config)
M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual(M1_config, M2_config)


def get_model(inputs_shape):
Expand Down Expand Up @@ -188,7 +196,10 @@ def test_save(self):
M1.save(filepath="siamese.hdf5", save_weights=False)
M2 = Model.load(filepath="siamese.hdf5", load_weights=False)

self.assertEqual(M1.config, M2.config)
M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual(M1_config, M2_config)


class Vgg_LayerList_test(CustomTestCase):
Expand All @@ -204,7 +215,10 @@ def test_save(self):
M1.save(filepath="vgg.hdf5", save_weights=False)
M2 = Model.load(filepath="vgg.hdf5", load_weights=False)

self.assertEqual(M1.config, M2.config)
M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual(M1_config, M2_config)


class List_inputs_outputs_test(CustomTestCase):
Expand All @@ -228,7 +242,10 @@ def test_list_inputs_outputs(self):
M1.save(filepath="list.hdf5", save_weights=False)
M2 = Model.load(filepath="list.hdf5", load_weights=False)

self.assertEqual(M1.config, M2.config)
M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual(M1_config, M2_config)


class Lambda_layer_test(CustomTestCase):
Expand All @@ -251,8 +268,11 @@ def test_lambda_layer_no_para_no_args(self):
output1 = M1(npInput).numpy()
output2 = M1(npInput).numpy()

M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual((output1 == output2).all(), True)
self.assertEqual(M1.config, M2.config)
self.assertEqual(M1_config, M2_config)

def test_lambda_layer_no_para_with_args(self):

Expand All @@ -272,9 +292,12 @@ def customize_func(x, foo=42): # x is the inputs, foo is an argument
output1 = M1(npInput).numpy()
output2 = M2(npInput).numpy()

M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual((output1 == output2).all(), True)
self.assertEqual((output1 == (np.zeros((8, 3)) + 9)).all(), True)
self.assertEqual(M1.config, M2.config)
self.assertEqual(M1_config, M2_config)

def test_lambda_layer_keras_model(self):
input_shape = [100, 5]
Expand All @@ -299,8 +322,11 @@ def test_lambda_layer_keras_model(self):
output2 = M2(npInput).numpy()
output4 = M4(npInput).numpy()

M2_config = RemoveDateInConfig(M2.config)
M4_config = RemoveDateInConfig(M4.config)

self.assertEqual((output2 == output4).all(), True)
self.assertEqual(M2.config, M4.config)
self.assertEqual(M2_config, M4_config)

ori_weights = M4.all_weights
ori_val = ori_weights[1].numpy()
Expand Down Expand Up @@ -328,8 +354,11 @@ def test_lambda_layer_keras_layer(self):
output1 = M1(npInput).numpy()
output3 = M3(npInput).numpy()

M1_config = RemoveDateInConfig(M1.config)
M3_config = RemoveDateInConfig(M3.config)

self.assertEqual((output1 == output3).all(), True)
self.assertEqual(M1.config, M3.config)
self.assertEqual(M1_config, M3_config)

ori_weights = M3.all_weights
ori_val = ori_weights[1].numpy()
Expand Down Expand Up @@ -365,8 +394,11 @@ def func(noise, mean, std, foo=42):
output1 = M1(ipt).numpy()
output2 = M2(ipt).numpy()

M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual((output1 == output2).all(), True)
self.assertEqual(M1.config, M2.config)
self.assertEqual(M1_config, M2_config)

def test_elementwise_no_para_no_args(self):
# z = mean + noise * tf.exp(std * 0.5) + foo
Expand All @@ -387,8 +419,11 @@ def func(noise, mean, std, foo=42):
output1 = M1(ipt).numpy()
output2 = M2(ipt).numpy()

M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual((output1 == output2).all(), True)
self.assertEqual(M1.config, M2.config)
self.assertEqual(M1_config, M2_config)

def test_elementwise_lambda_func(self):
# z = mean + noise * tf.exp(std * 0.5)
Expand All @@ -410,8 +445,11 @@ def test_elementwise_lambda_func(self):
output1 = M1(ipt).numpy()
output2 = M2(ipt).numpy()

M1_config = RemoveDateInConfig(M1.config)
M2_config = RemoveDateInConfig(M2.config)

self.assertEqual((output1 == output2).all(), True)
self.assertEqual(M1.config, M2.config)
self.assertEqual(M1_config, M2_config)

# # ElementwiseLambda does not support keras layer/model func yet
# def test_elementwise_keras_model(self):
Expand All @@ -434,8 +472,11 @@ def test_elementwise_lambda_func(self):
# output1 = M1(ipt).numpy()
# output2 = M2(ipt).numpy()
#
# M1_config = RemoveDateInConfig(M1.config)
# M2_config = RemoveDateInConfig(M2.config)
#
# self.assertEqual((output1 == output2).all(), True)
# self.assertEqual(M1.config, M2.config)
# self.assertEqual(M1_config, M2_config)


class basic_dynamic_model(Model):
Expand Down