Skip to content

Commit f13843f

Browse files
committed
Turn on the tests for MNIST
1 parent 08b7ed3 commit f13843f

File tree

2 files changed

+243
-0
lines changed

2 files changed

+243
-0
lines changed
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
from __future__ import print_function
15+
import argparse
16+
import paddle.fluid as fluid
17+
import paddle
18+
import sys
19+
import numpy
20+
import unittest
21+
import math
22+
import sys
23+
import os
24+
25+
BATCH_SIZE = 64
26+
27+
28+
def inference_program():
29+
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
30+
31+
conv_pool_1 = fluid.nets.simple_img_conv_pool(
32+
input=img,
33+
filter_size=5,
34+
num_filters=20,
35+
pool_size=2,
36+
pool_stride=2,
37+
act="relu")
38+
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
39+
conv_pool_2 = fluid.nets.simple_img_conv_pool(
40+
input=conv_pool_1,
41+
filter_size=5,
42+
num_filters=50,
43+
pool_size=2,
44+
pool_stride=2,
45+
act="relu")
46+
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
47+
return prediction
48+
49+
50+
def train_program():
51+
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
52+
53+
predict = inference_program()
54+
cost = fluid.layers.cross_entropy(input=predict, label=label)
55+
avg_cost = fluid.layers.mean(cost)
56+
# acc = fluid.layers.accuracy(input=predict, label=label)
57+
# return avg_cost, acc
58+
return avg_cost
59+
60+
61+
def train(use_cuda, save_dirname):
62+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
63+
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
64+
65+
trainer = fluid.Trainer(train_program, place=place, optimizer=optimizer)
66+
67+
def event_handler(event):
68+
if isinstance(event, fluid.EndEpochEvent):
69+
if (event.epoch + 1) % 10 == 0:
70+
test_reader = paddle.batch(
71+
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
72+
test_metrics = trainer.test(reader=test_reader)
73+
avg_cost_set = test_metrics[0]
74+
acc_set = test_metrics[1]
75+
76+
# get test acc and loss
77+
acc = numpy.array(acc_set).mean()
78+
avg_cost = numpy.array(avg_cost_set).mean()
79+
80+
print("avg_cost: %s" % avg_cost)
81+
print("acc : %s" % acc)
82+
83+
if float(acc) > 0.2: # Smaller value to increase CI speed
84+
trainer.save_params(save_dirname)
85+
else:
86+
print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
87+
event.epoch + 1, float(avg_cost), float(acc)))
88+
if math.isnan(float(avg_cost)):
89+
sys.exit("got NaN loss, training failed.")
90+
91+
train_reader = paddle.batch(
92+
paddle.reader.shuffle(
93+
paddle.dataset.mnist.train(), buf_size=500),
94+
batch_size=BATCH_SIZE)
95+
96+
trainer.train(
97+
num_epochs=100,
98+
event_handler=event_handler,
99+
reader=train_reader,
100+
feed_order=['img', 'label'])
101+
102+
103+
def infer(use_cuda, save_dirname=None):
104+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
105+
106+
inferencer = fluid.Inferencer(
107+
inference_program, param_path=save_dirname, place=place)
108+
109+
batch_size = 1
110+
tensor_img = numpy.random.uniform(-1.0, 1.0,
111+
[batch_size, 1, 28, 28]).astype("float32")
112+
113+
results = inferencer.infer({'img': tensor_img})
114+
115+
print("infer results: ", results[0])
116+
117+
118+
def main(use_cuda):
119+
save_dirname = "recognize_digits_conv.inference.model"
120+
121+
# call train() with is_local argument to run distributed train
122+
train(use_cuda=use_cuda, save_dirname=save_dirname)
123+
infer(use_cuda=use_cuda, save_dirname=save_dirname)
124+
125+
126+
if __name__ == '__main__':
127+
for use_cuda in (False, True):
128+
main(use_cuda=use_cuda)
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
from __future__ import print_function
15+
import argparse
16+
import paddle.fluid as fluid
17+
import paddle
18+
import sys
19+
import numpy
20+
import unittest
21+
import math
22+
import sys
23+
import os
24+
25+
BATCH_SIZE = 64
26+
27+
28+
def inference_program():
29+
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
30+
31+
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
32+
hidden = fluid.layers.fc(input=hidden, size=200, act='tanh')
33+
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
34+
return prediction
35+
36+
37+
def train_program():
38+
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
39+
40+
predict = inference_program()
41+
cost = fluid.layers.cross_entropy(input=predict, label=label)
42+
avg_cost = fluid.layers.mean(cost)
43+
# acc = fluid.layers.accuracy(input=predict, label=label)
44+
# return avg_cost, acc
45+
return avg_cost
46+
47+
48+
def train(use_cuda, save_dirname):
49+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
50+
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
51+
52+
trainer = fluid.Trainer(train_program, place=place, optimizer=optimizer)
53+
54+
def event_handler(event):
55+
if isinstance(event, fluid.EndEpochEvent):
56+
if (event.epoch + 1) % 10 == 0:
57+
test_reader = paddle.batch(
58+
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
59+
test_metrics = trainer.test(reader=test_reader)
60+
avg_cost_set = test_metrics[0]
61+
acc_set = test_metrics[1]
62+
63+
# get test acc and loss
64+
acc = numpy.array(acc_set).mean()
65+
avg_cost = numpy.array(avg_cost_set).mean()
66+
67+
print("avg_cost: %s" % avg_cost)
68+
print("acc : %s" % acc)
69+
70+
if float(acc) > 0.2: # Smaller value to increase CI speed
71+
trainer.save_params(save_dirname)
72+
else:
73+
print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
74+
event.epoch + 1, float(avg_cost), float(acc)))
75+
if math.isnan(float(avg_cost)):
76+
sys.exit("got NaN loss, training failed.")
77+
78+
train_reader = paddle.batch(
79+
paddle.reader.shuffle(
80+
paddle.dataset.mnist.train(), buf_size=500),
81+
batch_size=BATCH_SIZE)
82+
83+
trainer.train(
84+
num_epochs=100,
85+
event_handler=event_handler,
86+
reader=train_reader,
87+
feed_order=['img', 'label'])
88+
89+
90+
def infer(use_cuda, save_dirname=None):
91+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
92+
93+
inferencer = fluid.Inferencer(
94+
inference_program, param_path=save_dirname, place=place)
95+
96+
batch_size = 1
97+
tensor_img = numpy.random.uniform(-1.0, 1.0,
98+
[batch_size, 1, 28, 28]).astype("float32")
99+
100+
results = inferencer.infer({'img': tensor_img})
101+
102+
print("infer results: ", results[0])
103+
104+
105+
def main(use_cuda):
106+
save_dirname = "recognize_digits_mlp.inference.model"
107+
108+
# call train() with is_local argument to run distributed train
109+
train(use_cuda=use_cuda, save_dirname=save_dirname)
110+
infer(use_cuda=use_cuda, save_dirname=save_dirname)
111+
112+
113+
if __name__ == '__main__':
114+
for use_cuda in (False, True):
115+
main(use_cuda=use_cuda)

0 commit comments

Comments
 (0)