@@ -53,8 +53,9 @@ def train_program():
5353 predict = inference_program ()
5454 cost = fluid .layers .cross_entropy (input = predict , label = label )
5555 avg_cost = fluid .layers .mean (cost )
56- acc = fluid .layers .accuracy (input = predict , label = label )
57- return avg_cost , acc
56+ # acc = fluid.layers.accuracy(input=predict, label=label)
57+ # return avg_cost, acc
58+ return avg_cost
5859
5960
6061def train (use_cuda , save_dirname ):
@@ -65,38 +66,38 @@ def train(use_cuda, save_dirname):
6566
6667 def event_handler (event ):
6768 if isinstance (event , fluid .EndEpochEvent ):
68- if (event .epoch + 1 ) % 10 == 0 :
69- trainer .save_params (save_dirname )
70-
71- # TODO: Uncomment this part once we are sure that .train is working
72- # test_reader = paddle.batch(
73- # paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
74- # test_metrics = trainer.test(reader=test_reader)
75- # avg_cost_set = test_metrics[0]
76- # acc_set = test_metrics[1]
77- #
78- # # get test acc and loss
79- # acc = numpy.array(acc_set).mean()
80- # avg_cost = numpy.array(avg_cost_set).mean()
81- #
82- # print("avg_cost: %s" % avg_cost)
83- # print("acc : %s" % acc)
84- #
85- # if float(acc) > 0.2: # Smaller value to increase CI speed
86- # trainer.save_params(save_dirname)
87- # else:
88- # print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
89- # event.epoch + 1, float(avg_cost), float(acc)))
90- # if math.isnan(float(avg_cost)):
91- # sys.exit("got NaN loss, training failed.")
69+ # if (event.epoch + 1) % 10 == 0:
70+ trainer .save_params (save_dirname )
71+
72+ # TODO: Uncomment this part once we are sure that .train is working
73+ # test_reader = paddle.batch(
74+ # paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
75+ # test_metrics = trainer.test(reader=test_reader)
76+ # avg_cost_set = test_metrics[0]
77+ # acc_set = test_metrics[1]
78+ #
79+ # # get test acc and loss
80+ # acc = numpy.array(acc_set).mean()
81+ # avg_cost = numpy.array(avg_cost_set).mean()
82+ #
83+ # print("avg_cost: %s" % avg_cost)
84+ # print("acc : %s" % acc)
85+ #
86+ # if float(acc) > 0.2: # Smaller value to increase CI speed
87+ # trainer.save_params(save_dirname)
88+ # else:
89+ # print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
90+ # event.epoch + 1, float(avg_cost), float(acc)))
91+ # if math.isnan(float(avg_cost)):
92+ # sys.exit("got NaN loss, training failed.")
9293
9394 train_reader = paddle .batch (
9495 paddle .reader .shuffle (
9596 paddle .dataset .mnist .train (), buf_size = 500 ),
9697 batch_size = BATCH_SIZE )
9798
9899 trainer .train (
99- num_epochs = 100 ,
100+ num_epochs = 1 ,
100101 event_handler = event_handler ,
101102 reader = train_reader ,
102103 feed_order = ['img' , 'label' ])
@@ -126,5 +127,5 @@ def main(use_cuda):
126127
127128
128129if __name__ == '__main__' :
129- for use_cuda in (False , True ):
130- main (use_cuda = use_cuda )
130+ # for use_cuda in (False, True):
131+ main (use_cuda = False )
0 commit comments