Skip to content

Commit 1aaeb25

Browse files
authored
Merge pull request #313 from tensorlayer/logging
[WIP] replace all print to logging.info
2 parents 93dd27f + 39f8411 commit 1aaeb25

21 files changed

+181
-175
lines changed

.gitignore

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,12 @@
1-
.idea
2-
docs/_build
3-
tensorlayer/__pacache__
4-
tensorlayer/.DS_Store
1+
*.gz
2+
*.npz
3+
*.pyc
4+
*~
55
.DS_Store
6-
dist
6+
.idea
7+
.spyproject/
78
build/
9+
dist
10+
docs/_build
811
tensorlayer.egg-info
9-
data/.DS_Store
10-
*.pyc
11-
*.gz
12-
.spyproject/
13-
*~
14-
12+
tensorlayer/__pacache__

tensorlayer/_logging.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import logging
2+
3+
logging.basicConfig(level=logging.INFO, format='[TL] %(message)s')
4+
5+
6+
def info(fmt, *args):
7+
logging.info(fmt, *args)

tensorlayer/layers/convolution.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def __init__(
5454
):
5555
Layer.__init__(self, name=name)
5656
self.inputs = layer.outputs
57-
print(" [TL] Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (self.name, str(shape), str(stride), padding, act.__name__))
57+
logging.info("Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (self.name, str(shape), str(stride), padding, act.__name__))
5858
if act is None:
5959
act = tf.identity
6060
with tf.variable_scope(name) as vs:
@@ -158,7 +158,7 @@ def __init__(
158158
):
159159
Layer.__init__(self, name=name)
160160
self.inputs = layer.outputs
161-
print(" [TL] Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__))
161+
logging.info("Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__))
162162

163163
with tf.variable_scope(name) as vs:
164164
W = tf.get_variable(name='W_conv2d', shape=shape, initializer=W_init, dtype=D_TYPE, **W_init_args)
@@ -267,9 +267,9 @@ def __init__(
267267
):
268268
Layer.__init__(self, name=name)
269269
self.inputs = layer.outputs
270-
print(" [TL] DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding,
270+
logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding,
271271
act.__name__))
272-
# print(" DeConv2dLayer: Untested")
272+
# logging.info(" DeConv2dLayer: Untested")
273273
with tf.variable_scope(name) as vs:
274274
W = tf.get_variable(name='W_deconv2d', shape=shape, initializer=W_init, dtype=D_TYPE, **W_init_args)
275275
if b_init:
@@ -331,7 +331,7 @@ def __init__(
331331
):
332332
Layer.__init__(self, name=name)
333333
self.inputs = layer.outputs
334-
print(" [TL] Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__))
334+
logging.info("Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__))
335335

336336
with tf.variable_scope(name) as vs:
337337
# W = tf.Variable(W_init(shape=shape, **W_init_args), name='W_conv')
@@ -394,7 +394,7 @@ def __init__(
394394
):
395395
Layer.__init__(self, name=name)
396396
self.inputs = layer.outputs
397-
print(" [TL] DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding,
397+
logging.info("DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding,
398398
act.__name__))
399399

400400
with tf.variable_scope(name) as vs:
@@ -452,7 +452,7 @@ def __init__(
452452
size = [int(size_h), int(size_w)]
453453
else:
454454
raise Exception("Donot support shape %s" % self.inputs.get_shape())
455-
print(" [TL] UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners))
455+
logging.info("UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners))
456456
with tf.variable_scope(name) as vs:
457457
try:
458458
self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners)
@@ -507,7 +507,7 @@ def __init__(
507507
size = [int(size_h), int(size_w)]
508508
else:
509509
raise Exception("Donot support shape %s" % self.inputs.get_shape())
510-
print(" [TL] DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners))
510+
logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners))
511511
with tf.variable_scope(name) as vs:
512512
try:
513513
self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners)
@@ -699,7 +699,7 @@ def __init__(self,
699699
self.inputs = layer.outputs
700700
self.offset_layer = offset_layer
701701

702-
print(" [TL] DeformableConv2dLayer %s: shape:%s, act:%s" % (self.name, str(shape), act.__name__))
702+
logging.info("DeformableConv2dLayer %s: shape:%s, act:%s" % (self.name, str(shape), act.__name__))
703703

704704
with tf.variable_scope(name) as vs:
705705
offset = self.offset_layer.outputs
@@ -836,7 +836,7 @@ def __init__(self,
836836
self.inputs = layer.outputs
837837
if act is None:
838838
act = tf.identity
839-
print(" [TL] AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (self.name, n_filter, filter_size, rate, padding, act.__name__))
839+
logging.info("AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (self.name, n_filter, filter_size, rate, padding, act.__name__))
840840
with tf.variable_scope(name) as vs:
841841
shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter]
842842
filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, dtype=D_TYPE, **W_init_args)
@@ -909,8 +909,8 @@ def __init__(self,
909909

910910
bias_initializer = bias_initializer()
911911

912-
print(" [TL] SeparableConv2dLayer %s: filters:%s kernel_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" %
913-
(self.name, str(filters), str(kernel_size), str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__))
912+
logging.info("SeparableConv2dLayer %s: filters:%s kernel_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" %
913+
(self.name, str(filters), str(kernel_size), str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__))
914914

915915
with tf.variable_scope(name) as vs:
916916
self.outputs = tf.layers.separable_conv2d(
@@ -1116,7 +1116,7 @@ def conv2d(
11161116
pre_channel = int(net.outputs.get_shape()[-1])
11171117
except: # if pre_channel is ?, it happens when using Spatial Transformer Net
11181118
pre_channel = 1
1119-
print("[warnings] unknow input channels, set to 1")
1119+
logging.info("[warnings] unknow input channels, set to 1")
11201120
net = Conv2dLayer(
11211121
net,
11221122
act=act,
@@ -1169,7 +1169,7 @@ def deconv2d(net,
11691169
act = tf.identity
11701170

11711171
if tf.__version__ > '1.3':
1172-
print(" [TL] DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))
1172+
logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))
11731173
inputs = net.outputs
11741174
scope_name = tf.get_variable_scope().name
11751175
if scope_name:
@@ -1256,7 +1256,7 @@ def __init__(self,
12561256
if act is None:
12571257
act = tf.identity
12581258

1259-
print(" [TL] DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))
1259+
logging.info("DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__))
12601260

12611261
with tf.variable_scope(name) as vs:
12621262
self.outputs = tf.contrib.layers.conv3d_transpose(
@@ -1341,7 +1341,7 @@ def __init__(
13411341
if act is None:
13421342
act = tf.identity
13431343

1344-
print(" [TL] DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__))
1344+
logging.info("DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__))
13451345

13461346
if act is None:
13471347
act = tf.identity
@@ -1350,7 +1350,7 @@ def __init__(
13501350
pre_channel = int(layer.outputs.get_shape()[-1])
13511351
except: # if pre_channel is ?, it happens when using Spatial Transformer Net
13521352
pre_channel = 1
1353-
print("[warnings] unknow input channels, set to 1")
1353+
logging.info("[warnings] unknow input channels, set to 1")
13541354

13551355
shape = [shape[0], shape[1], pre_channel, channel_multiplier]
13561356

0 commit comments

Comments
 (0)