Skip to content

Commit d35ebdf

Browse files
committed
Make the train program only return avg_cost for now
1 parent 918537b commit d35ebdf

File tree

2 files changed

+60
-58
lines changed

2 files changed

+60
-58
lines changed

python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py

Lines changed: 30 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,9 @@ def train_program():
5353
predict = inference_program()
5454
cost = fluid.layers.cross_entropy(input=predict, label=label)
5555
avg_cost = fluid.layers.mean(cost)
56-
acc = fluid.layers.accuracy(input=predict, label=label)
57-
return avg_cost, acc
56+
# acc = fluid.layers.accuracy(input=predict, label=label)
57+
# return avg_cost, acc
58+
return avg_cost
5859

5960

6061
def train(use_cuda, save_dirname):
@@ -65,38 +66,38 @@ def train(use_cuda, save_dirname):
6566

6667
def event_handler(event):
6768
if isinstance(event, fluid.EndEpochEvent):
68-
if (event.epoch + 1) % 10 == 0:
69-
trainer.save_params(save_dirname)
70-
71-
# TODO: Uncomment this part once we are sure that .train is working
72-
# test_reader = paddle.batch(
73-
# paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
74-
# test_metrics = trainer.test(reader=test_reader)
75-
# avg_cost_set = test_metrics[0]
76-
# acc_set = test_metrics[1]
77-
#
78-
# # get test acc and loss
79-
# acc = numpy.array(acc_set).mean()
80-
# avg_cost = numpy.array(avg_cost_set).mean()
81-
#
82-
# print("avg_cost: %s" % avg_cost)
83-
# print("acc : %s" % acc)
84-
#
85-
# if float(acc) > 0.2: # Smaller value to increase CI speed
86-
# trainer.save_params(save_dirname)
87-
# else:
88-
# print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
89-
# event.epoch + 1, float(avg_cost), float(acc)))
90-
# if math.isnan(float(avg_cost)):
91-
# sys.exit("got NaN loss, training failed.")
69+
# if (event.epoch + 1) % 10 == 0:
70+
trainer.save_params(save_dirname)
71+
72+
# TODO: Uncomment this part once we are sure that .train is working
73+
# test_reader = paddle.batch(
74+
# paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
75+
# test_metrics = trainer.test(reader=test_reader)
76+
# avg_cost_set = test_metrics[0]
77+
# acc_set = test_metrics[1]
78+
#
79+
# # get test acc and loss
80+
# acc = numpy.array(acc_set).mean()
81+
# avg_cost = numpy.array(avg_cost_set).mean()
82+
#
83+
# print("avg_cost: %s" % avg_cost)
84+
# print("acc : %s" % acc)
85+
#
86+
# if float(acc) > 0.2: # Smaller value to increase CI speed
87+
# trainer.save_params(save_dirname)
88+
# else:
89+
# print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
90+
# event.epoch + 1, float(avg_cost), float(acc)))
91+
# if math.isnan(float(avg_cost)):
92+
# sys.exit("got NaN loss, training failed.")
9293

9394
train_reader = paddle.batch(
9495
paddle.reader.shuffle(
9596
paddle.dataset.mnist.train(), buf_size=500),
9697
batch_size=BATCH_SIZE)
9798

9899
trainer.train(
99-
num_epochs=100,
100+
num_epochs=1,
100101
event_handler=event_handler,
101102
reader=train_reader,
102103
feed_order=['img', 'label'])
@@ -126,5 +127,5 @@ def main(use_cuda):
126127

127128

128129
if __name__ == '__main__':
129-
for use_cuda in (False, True):
130-
main(use_cuda=use_cuda)
130+
# for use_cuda in (False, True):
131+
main(use_cuda=False)

python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py

Lines changed: 30 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,9 @@ def train_program():
4040
predict = inference_program()
4141
cost = fluid.layers.cross_entropy(input=predict, label=label)
4242
avg_cost = fluid.layers.mean(cost)
43-
acc = fluid.layers.accuracy(input=predict, label=label)
44-
return avg_cost, acc
43+
# acc = fluid.layers.accuracy(input=predict, label=label)
44+
# return avg_cost, acc
45+
return avg_cost
4546

4647

4748
def train(use_cuda, save_dirname):
@@ -52,38 +53,38 @@ def train(use_cuda, save_dirname):
5253

5354
def event_handler(event):
5455
if isinstance(event, fluid.EndEpochEvent):
55-
if (event.epoch + 1) % 10 == 0:
56-
trainer.save_params(save_dirname)
57-
58-
# TODO: Uncomment this part once we are sure that .train is working
59-
# test_reader = paddle.batch(
60-
# paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
61-
# test_metrics = trainer.test(reader=test_reader)
62-
# avg_cost_set = test_metrics[0]
63-
# acc_set = test_metrics[1]
64-
#
65-
# # get test acc and loss
66-
# acc = numpy.array(acc_set).mean()
67-
# avg_cost = numpy.array(avg_cost_set).mean()
68-
#
69-
# print("avg_cost: %s" % avg_cost)
70-
# print("acc : %s" % acc)
71-
#
72-
# if float(acc) > 0.2: # Smaller value to increase CI speed
73-
# trainer.save_params(save_dirname)
74-
# else:
75-
# print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
76-
# event.epoch + 1, float(avg_cost), float(acc)))
77-
# if math.isnan(float(avg_cost)):
78-
# sys.exit("got NaN loss, training failed.")
56+
# if (event.epoch + 1) % 10 == 0:
57+
trainer.save_params(save_dirname)
58+
59+
# TODO: Uncomment this part once we are sure that .train is working
60+
# test_reader = paddle.batch(
61+
# paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
62+
# test_metrics = trainer.test(reader=test_reader)
63+
# avg_cost_set = test_metrics[0]
64+
# acc_set = test_metrics[1]
65+
#
66+
# # get test acc and loss
67+
# acc = numpy.array(acc_set).mean()
68+
# avg_cost = numpy.array(avg_cost_set).mean()
69+
#
70+
# print("avg_cost: %s" % avg_cost)
71+
# print("acc : %s" % acc)
72+
#
73+
# if float(acc) > 0.2: # Smaller value to increase CI speed
74+
# trainer.save_params(save_dirname)
75+
# else:
76+
# print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
77+
# event.epoch + 1, float(avg_cost), float(acc)))
78+
# if math.isnan(float(avg_cost)):
79+
# sys.exit("got NaN loss, training failed.")
7980

8081
train_reader = paddle.batch(
8182
paddle.reader.shuffle(
8283
paddle.dataset.mnist.train(), buf_size=500),
8384
batch_size=BATCH_SIZE)
8485

8586
trainer.train(
86-
num_epochs=100,
87+
num_epochs=1,
8788
event_handler=event_handler,
8889
reader=train_reader,
8990
feed_order=['img', 'label'])
@@ -113,5 +114,5 @@ def main(use_cuda):
113114

114115

115116
if __name__ == '__main__':
116-
for use_cuda in (False, True):
117-
main(use_cuda=use_cuda)
117+
# for use_cuda in (False, True):
118+
main(use_cuda=False)

0 commit comments

Comments
 (0)