33import tensorflow as tf
44import tensorlayer as tl
55
6- tf .logging .set_verbosity (tf .logging .DEBUG )
6+ # tf.logging.set_verbosity(tf.logging.DEBUG)
77tl .logging .set_verbosity (tl .logging .DEBUG )
88
9- sess = tf .InteractiveSession ()
10-
119# connect to database
1210db = tl .db .TensorHub (ip = 'localhost' , port = 27017 , dbname = 'temp' , project_name = 'tutorial' )
1311
1412# load dataset from database
1513X_train , y_train , X_val , y_val , X_test , y_test = db .find_top_dataset ('mnist' )
1614
17- # define placeholder
18- x = tf .placeholder (tf .float32 , shape = [None , 784 ], name = 'x' )
19- y_ = tf .placeholder (tf .int64 , shape = [None ], name = 'y_' )
20-
21-
2215# define the network
23- def mlp (x , is_train = True , reuse = False ):
24- with tf .variable_scope ("MLP" , reuse = reuse ):
25- net = tl .layers .InputLayer (x , name = 'input' )
26- net = tl .layers .DropoutLayer (net , keep = 0.8 , is_fix = True , is_train = is_train , name = 'drop1' )
27- net = tl .layers .DenseLayer (net , n_units = n_units1 , act = tf .nn .relu , name = 'relu1' )
28- net = tl .layers .DropoutLayer (net , keep = 0.5 , is_fix = True , is_train = is_train , name = 'drop2' )
29- net = tl .layers .DenseLayer (net , n_units = n_units2 , act = tf .nn .relu , name = 'relu2' )
30- net = tl .layers .DropoutLayer (net , keep = 0.5 , is_fix = True , is_train = is_train , name = 'drop3' )
31- net = tl .layers .DenseLayer (net , n_units = 10 , act = None , name = 'output' )
32- return net
33-
34-
35- # define inferences
36- net_train = mlp (x , is_train = True , reuse = False )
37- net_test = mlp (x , is_train = False , reuse = True )
38-
39- # cost for training
40- y = net_train .outputs
41- cost = tl .cost .cross_entropy (y , y_ , name = 'xentropy' )
42- correct_prediction = tf .equal (tf .argmax (y , 1 ), y_ )
43- acc = tf .reduce_mean (tf .cast (correct_prediction , tf .float32 ))
44-
45- # cost and accuracy for evalution
46- y2 = net_test .outputs
47- cost_test = tl .cost .cross_entropy (y2 , y_ , name = 'xentropy2' )
48- correct_prediction = tf .equal (tf .argmax (y2 , 1 ), y_ )
49- acc_test = tf .reduce_mean (tf .cast (correct_prediction , tf .float32 ))
16+ def mlp ():
17+ ni = tl .layers .Input ([None , 784 ], name = 'input' )
18+ net = tl .layers .Dropout (keep = 0.8 , name = 'drop1' )(ni )
19+ net = tl .layers .Dense (n_units = n_units1 , act = tf .nn .relu , name = 'relu1' )(net )
20+ net = tl .layers .Dropout (keep = 0.5 , name = 'drop2' )(net )
21+ net = tl .layers .Dense (n_units = n_units2 , act = tf .nn .relu , name = 'relu2' )(net )
22+ net = tl .layers .Dropout (keep = 0.5 , name = 'drop3' )(net )
23+ net = tl .layers .Dense (n_units = 10 , act = None , name = 'output' )(net )
24+ M = tl .models .Model (inputs = ni , outputs = net )
25+ return M
26+
27+ network = mlp ()
28+
29+ # cost and accuracy
30+ cost = tl .cost .cross_entropy
31+
32+ def acc (y , y_ ):
33+ correct_prediction = tf .equal (tf .argmax (y , 1 ), tf .convert_to_tensor (y_ , tf .int64 ))
34+ return tf .reduce_mean (tf .cast (correct_prediction , tf .float32 ))
5035
5136# define the optimizer
52- train_params = tl .layers .get_variables_with_name ('MLP' , True , False )
53- train_op = tf .train .AdamOptimizer (learning_rate = 0.0001 ).minimize (cost , var_list = train_params )
54-
55- # initialize all variables in the session
56- sess .run (tf .global_variables_initializer ())
37+ train_op = tf .optimizers .Adam (learning_rate = 0.0001 )
5738
5839# train the network
40+ # tl.utils.fit(
41+ # network, train_op, cost, X_train, y_train, acc=acc, batch_size=500, n_epoch=20, print_freq=5,
42+ # X_val=X_val, y_val=y_val, eval_train=False
43+ # )
44+
5945tl .utils .fit (
60- sess , net_train , train_op , cost , X_train , y_train , x , y_ , acc = acc , batch_size = 500 , n_epoch = 1 , print_freq = 5 ,
61- X_val = X_val , y_val = y_val , eval_train = False
46+ network , train_op = tf . optimizers . Adam ( learning_rate = 0.0001 ), cost = tl . cost . cross_entropy , X_train = X_train ,
47+ y_train = y_train , acc = acc , batch_size = 256 , n_epoch = 20 , X_val = X_val , y_val = y_val , eval_train = False ,
6248)
6349
6450# evaluation and save result that match the result_key
65- test_accuracy = tl .utils .test (sess , net_test , acc_test , X_test , y_test , x , y_ , batch_size = None , cost = cost_test )
51+ test_accuracy = tl .utils .test (network , acc , X_test , y_test , batch_size = None , cost = cost )
6652test_accuracy = float (test_accuracy )
6753
6854# save model into database
69- db .save_model (net_train , model_name = 'mlp' , name = str (n_units1 ) + '-' + str (n_units2 ), test_accuracy = test_accuracy )
55+ db .save_model (network , model_name = 'mlp' , name = str (n_units1 ) + '-' + str (n_units2 ), test_accuracy = test_accuracy )
7056# in other script, you can load the model as follow
7157# net = db.find_model(sess=sess, model_name=str(n_units1)+'-'+str(n_units2)
58+
59+ tf .python .keras .layers .BatchNormalization
0 commit comments