diff --git a/tensorlayer/files.py b/tensorlayer/files.py index 30b1cac51..b9137c576 100644 --- a/tensorlayer/files.py +++ b/tensorlayer/files.py @@ -1,4 +1,3 @@ -#! /usr/bin/python # -*- coding: utf-8 -*- import gzip @@ -15,6 +14,7 @@ from tensorflow.python.platform import gfile from . import nlp, utils, visualize +from . import _logging as logging ## Load dataset functions @@ -42,7 +42,7 @@ def load_mnist_dataset(shape=(-1, 784), path="data"): def load_mnist_images(path, filename): filepath = maybe_download_and_extract(filename, path, 'http://yann.lecun.com/exdb/mnist/') - print(filepath) + logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) @@ -63,7 +63,7 @@ def load_mnist_labels(path, filename): return data # Download and read the training and test set images and labels. - print("Load or Download MNIST > {}".format(path)) + logging.info("Load or Download MNIST > {}".format(path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') @@ -116,7 +116,7 @@ def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False, sec - ``_ """ path = os.path.join(path, 'cifar10') - print("Load or Download cifar10 > {}".format(path)) + logging.info("Load or Download cifar10 > {}".format(path)) #Helper function to unpickle the data def unpickle(file): @@ -163,11 +163,11 @@ def unpickle(file): y_train = np.array(y_train) if plotable == True: - print('\nCIFAR-10') + logging.info('\nCIFAR-10') import matplotlib.pyplot as plt fig = plt.figure(1) - print('Shape of a training image: X_train[0]', X_train[0].shape) + logging.info('Shape of a training image: X_train[0] %s' % X_train[0].shape) plt.ion() # interactive mode count = 1 @@ -189,10 +189,10 @@ def unpickle(file): plt.draw() # interactive mode plt.pause(3) # interactive mode - print("X_train:", X_train.shape) - print("y_train:", y_train.shape) - print("X_test:", X_test.shape) - print("y_test:", y_test.shape) + logging.info("X_train: %s" % X_train.shape) + logging.info("y_train: %s" % y_train.shape) + logging.info("X_test: %s" % X_test.shape) + logging.info("y_test: %s" % y_test.shape) X_train = np.asarray(X_train, dtype=np.float32) X_test = np.asarray(X_test, dtype=np.float32) @@ -228,7 +228,7 @@ def load_ptb_dataset(path='data'): - `Manual download `_ """ path = os.path.join(path, 'ptb') - print("Load or Download Penn TreeBank (PTB) dataset > {}".format(path)) + logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path)) #Maybe dowload and uncompress tar, or load exsisting files filename = 'simple-examples.tgz' @@ -247,10 +247,10 @@ def load_ptb_dataset(path='data'): test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id) vocabulary = len(word_to_id) - # print(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '', ''] - # print(train_data) # ... 214, 5, 23, 1, 2] - # print(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '': 2 ... } - # print(vocabulary) # 10000 + # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '', ''] + # logging.info(train_data) # ... 214, 5, 23, 1, 2] + # logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '': 2 ... } + # logging.info(vocabulary) # 10000 # exit() return train_data, valid_data, test_data, vocabulary @@ -278,7 +278,7 @@ def load_matt_mahoney_text8_dataset(path='data'): >>> print('Data size', len(words)) """ path = os.path.join(path, 'mm_test8') - print("Load or Download matt_mahoney_text8 Dataset> {}".format(path)) + logging.info("Load or Download matt_mahoney_text8 Dataset> {}".format(path)) filename = 'text8.zip' url = 'http://mattmahoney.net/dc/' @@ -389,7 +389,7 @@ def load_nietzsche_dataset(path='data'): >>> words = basic_clean_str(words) >>> words = words.split() """ - print("Load or Download nietzsche dataset > {}".format(path)) + logging.info("Load or Download nietzsche dataset > {}".format(path)) path = os.path.join(path, 'nietzsche') filename = "nietzsche.txt" @@ -427,7 +427,7 @@ def load_wmt_en_fr_dataset(path='data'): def gunzip_file(gz_path, new_path): """Unzips from gz_path into new_path.""" - print("Unpacking %s to %s" % (gz_path, new_path)) + logging.info("Unpacking %s to %s" % (gz_path, new_path)) with gzip.open(gz_path, "rb") as gz_file: with open(new_path, "wb") as new_file: for line in gz_file: @@ -449,7 +449,7 @@ def get_wmt_enfr_dev_set(path): dev_name = "newstest2013" dev_path = os.path.join(path, "newstest2013") if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")): - print("Extracting tgz file %s" % dev_file) + logging.info("Extracting tgz file %s" % dev_file) with tarfile.open(dev_file, "r:gz") as dev_tar: fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") @@ -459,7 +459,7 @@ def get_wmt_enfr_dev_set(path): dev_tar.extract(en_dev_file, path) return dev_path - print("Load or Download WMT English-to-French translation > {}".format(path)) + logging.info("Load or Download WMT English-to-French translation > {}".format(path)) train_path = get_wmt_enfr_train_set(path) dev_path = get_wmt_enfr_dev_set(path) @@ -496,7 +496,7 @@ def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False url = 'http://press.liacs.nl/mirflickr/mirflickr25k/' ## download dataset if folder_exists(path + "/mirflickr") is False: - print("[*] Flickr25k is nonexistent in {}".format(path)) + logging.info("[*] Flickr25k is nonexistent in {}".format(path)) maybe_download_and_extract(filename, path, url, extract=True) del_file(path + '/' + filename) ## return images by the given tag. @@ -504,21 +504,21 @@ def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False folder_imgs = path + "/mirflickr" path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False) path_imgs.sort(key=natural_keys) - # print(path_imgs[0:10]) + # logging.info(path_imgs[0:10]) # 2. tag path list folder_tags = path + "/mirflickr/meta/tags" path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False) path_tags.sort(key=natural_keys) - # print(path_tags[0:10]) + # logging.info(path_tags[0:10]) # 3. select images if tag is None: - print("[Flickr25k] reading all images") + logging.info("[Flickr25k] reading all images") else: - print("[Flickr25k] reading images with tag: {}".format(tag)) + logging.info("[Flickr25k] reading images with tag: {}".format(tag)) images_list = [] for idx in range(0, len(path_tags)): tags = read_file(folder_tags + '/' + path_tags[idx]).split('\n') - # print(idx+1, tags) + # logging.info(idx+1, tags) if tag is None or tag in tags: images_list.append(path_imgs[idx]) @@ -552,7 +552,7 @@ def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printab >>> images = tl.files.load_flickr1M_dataset(tag='zebra') """ path = os.path.join(path, 'flickr1M') - print("[Flickr1M] using {}% of images = {}".format(size * 10, size * 100000)) + logging.info("[Flickr1M] using {}% of images = {}".format(size * 10, size * 100000)) images_zip = [ 'images0.zip', 'images1.zip', 'images2.zip', 'images3.zip', 'images4.zip', 'images5.zip', 'images6.zip', 'images7.zip', 'images8.zip', 'images9.zip' ] @@ -561,22 +561,22 @@ def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printab ## download dataset for image_zip in images_zip[0:size]: image_folder = image_zip.split(".")[0] - # print(path+"/"+image_folder) + # logging.info(path+"/"+image_folder) if folder_exists(path + "/" + image_folder) is False: - # print(image_zip) - print("[Flickr1M] {} is missing in {}".format(image_folder, path)) + # logging.info(image_zip) + logging.info("[Flickr1M] {} is missing in {}".format(image_folder, path)) maybe_download_and_extract(image_zip, path, url, extract=True) del_file(path + '/' + image_zip) os.system("mv {} {}".format(path + '/images', path + '/' + image_folder)) else: - print("[Flickr1M] {} exists in {}".format(image_folder, path)) + logging.info("[Flickr1M] {} exists in {}".format(image_folder, path)) ## download tag if folder_exists(path + "/tags") is False: - print("[Flickr1M] tag files is nonexistent in {}".format(path)) + logging.info("[Flickr1M] tag files is nonexistent in {}".format(path)) maybe_download_and_extract(tag_zip, path, url, extract=True) del_file(path + '/' + tag_zip) else: - print("[Flickr1M] tags exists in {}".format(path)) + logging.info("[Flickr1M] tags exists in {}".format(path)) ## 1. image path list images_list = [] @@ -584,36 +584,36 @@ def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printab for i in range(0, size): images_folder_list += load_folder_list(path=path + '/images%d' % i) images_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd - # print(images_folder_list) + # logging.info(images_folder_list) # exit() for folder in images_folder_list[0:size * 10]: tmp = load_file_list(path=folder, regx='\\.jpg', printable=False) tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.jpg - # print(tmp[0::570]) + # logging.info(tmp[0::570]) images_list.extend([folder + '/' + x for x in tmp]) - # print('IM', len(images_list), images_list[0::6000]) + # logging.info('IM', len(images_list), images_list[0::6000]) ## 2. tag path list tag_list = [] tag_folder_list = load_folder_list(path + "/tags") tag_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd for folder in tag_folder_list[0:size * 10]: - # print(folder) + # logging.info(folder) tmp = load_file_list(path=folder, regx='\\.txt', printable=False) tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.txt tmp = [folder + '/' + s for s in tmp] tag_list += tmp - # print('T', len(tag_list), tag_list[0::6000]) + # logging.info('T', len(tag_list), tag_list[0::6000]) # exit() ## 3. select images - print("[Flickr1M] searching tag: {}".format(tag)) + logging.info("[Flickr1M] searching tag: {}".format(tag)) select_images_list = [] for idx in range(0, len(tag_list)): tags = read_file(tag_list[idx]).split('\n') if tag in tags: select_images_list.append(images_list[idx]) - # print(idx, tags, tag_list[idx], images_list[idx]) - print("[Flickr1M] reading images with tag: {}".format(tag)) + # logging.info(idx, tags, tag_list[idx], images_list[idx]) + logging.info("[Flickr1M] reading images with tag: {}".format(tag)) images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable) return images @@ -636,7 +636,7 @@ def load_cyclegan_dataset(filename='summer2winter_yosemite', path='data'): url = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/' if folder_exists(os.path.join(path, filename)) is False: - print("[*] {} is nonexistent in {}".format(filename, path)) + logging.info("[*] {} is nonexistent in {}".format(filename, path)) maybe_download_and_extract(filename + '.zip', path, url, extract=True) del_file(os.path.join(path, filename + '.zip')) @@ -708,7 +708,7 @@ def load_celebA_dataset(dirpath='data'): save_path = os.path.join(dirpath, filename) image_path = os.path.join(dirpath, data_dir) if os.path.exists(image_path): - print('[*] {} already exists'.format(save_path)) + logging.info('[*] {} already exists'.format(save_path)) else: exists_or_mkdir(dirpath) download_file_from_google_drive(drive_id, save_path) @@ -827,19 +827,19 @@ def _recursive_parse_xml_to_dict(xml): url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/" tar_filename = "VOCtrainval_11-May-2012.tar" extracted_filename = "VOC2012" #"VOCdevkit/VOC2012" - print(" [============= VOC 2012 =============]") + logging.info(" [============= VOC 2012 =============]") elif dataset == "2012test": extracted_filename = "VOC2012test" #"VOCdevkit/VOC2012" - print(" [============= VOC 2012 Test Set =============]") - print(" \nAuthor: 2012test only have person annotation, so 2007test is highly recommended for testing !\n") + logging.info(" [============= VOC 2012 Test Set =============]") + logging.info(" \nAuthor: 2012test only have person annotation, so 2007test is highly recommended for testing !\n") import time time.sleep(3) if os.path.isdir(os.path.join(path, extracted_filename)) is False: - print("For VOC 2012 Test data - online registration required") - print( + logging.info("For VOC 2012 Test data - online registration required") + logging.info( " Please download VOC2012test.tar from: \n register: http://host.robots.ox.ac.uk:8080 \n voc2012 : http://host.robots.ox.ac.uk:8080/eval/challenges/voc2012/ \ndownload: http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar" ) - print(" unzip VOC2012test.tar,rename the folder to VOC2012test and put it into %s" % path) + logging.info(" unzip VOC2012test.tar,rename the folder to VOC2012test and put it into %s" % path) exit() # # http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar # url = "http://host.robots.ox.ac.uk:8080/eval/downloads/" @@ -848,14 +848,14 @@ def _recursive_parse_xml_to_dict(xml): url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/" tar_filename = "VOCtrainval_06-Nov-2007.tar" extracted_filename = "VOC2007" - print(" [============= VOC 2007 =============]") + logging.info(" [============= VOC 2007 =============]") elif dataset == "2007test": # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html#testdata # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/" tar_filename = "VOCtest_06-Nov-2007.tar" extracted_filename = "VOC2007test" - print(" [============= VOC 2007 Test Set =============]") + logging.info(" [============= VOC 2007 Test Set =============]") else: raise Exception("Please set the dataset aug to 2012, 2012test or 2007.") @@ -863,7 +863,7 @@ def _recursive_parse_xml_to_dict(xml): if dataset != "2012test": from sys import platform as _platform if folder_exists(os.path.join(path, extracted_filename)) is False: - print("[VOC] {} is nonexistent in {}".format(extracted_filename, path)) + logging.info("[VOC] {} is nonexistent in {}".format(extracted_filename, path)) maybe_download_and_extract(tar_filename, path, url, extract=True) del_file(os.path.join(path, tar_filename)) if dataset == "2012": @@ -895,33 +895,33 @@ def _recursive_parse_xml_to_dict(xml): classes += classes_in_person # use extra 3 classes for person classes_dict = utils.list_string_to_dict(classes) - print("[VOC] object classes {}".format(classes_dict)) + logging.info("[VOC] object classes {}".format(classes_dict)) ##======== 1. image path list # folder_imgs = path+"/"+extracted_filename+"/JPEGImages/" folder_imgs = os.path.join(path, extracted_filename, "JPEGImages") imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False) - print("[VOC] {} images found".format(len(imgs_file_list))) + logging.info("[VOC] {} images found".format(len(imgs_file_list))) imgs_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000027.jpg --> 2007000027 imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list] - # print('IM',imgs_file_list[0::3333], imgs_file_list[-1]) + # logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1]) if dataset != "2012test": ##======== 2. semantic segmentation maps path list # folder_semseg = path+"/"+extracted_filename+"/SegmentationClass/" folder_semseg = os.path.join(path, extracted_filename, "SegmentationClass") imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='\\.png', printable=False) - print("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list))) + logging.info("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list))) imgs_semseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000032.png --> 2007000032 imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list] - # print('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1]) + # logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1]) ##======== 3. instance segmentation maps path list # folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/" folder_insseg = os.path.join(path, extracted_filename, "SegmentationObject") imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='\\.png', printable=False) - print("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list))) + logging.info("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list))) imgs_insseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000032.png --> 2007000032 imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list] - # print('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1]) + # logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1]) else: imgs_semseg_file_list = [] imgs_insseg_file_list = [] @@ -929,10 +929,10 @@ def _recursive_parse_xml_to_dict(xml): # folder_ann = path+"/"+extracted_filename+"/Annotations/" folder_ann = os.path.join(path, extracted_filename, "Annotations") imgs_ann_file_list = load_file_list(path=folder_ann, regx='\\.xml', printable=False) - print("[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list))) + logging.info("[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list))) imgs_ann_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000027.xml --> 2007000027 imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list] - # print('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1]) + # logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1]) if dataset == "2012test": # remove unused images in JPEG folder imgs_file_list_new = [] @@ -943,7 +943,7 @@ def _recursive_parse_xml_to_dict(xml): imgs_file_list_new.append(im) break imgs_file_list = imgs_file_list_new - print("[VOC] keep %d images" % len(imgs_file_list_new)) + logging.info("[VOC] keep %d images" % len(imgs_file_list_new)) ##======== parse XML annotations def convert(size, box): @@ -970,7 +970,7 @@ def convert_annotation(file_name): h = int(size.find('height').text) n_objs = 0 - # print(file_name, w, h, size) + # logging.info(file_name, w, h, size) # exit() for obj in root.iter('object'): if dataset != "2012test": @@ -1004,12 +1004,12 @@ def convert_annotation(file_name): in_file.close() return n_objs, out_file - print("[VOC] Parsing xml annotations files") + logging.info("[VOC] Parsing xml annotations files") n_objs_list = [] objs_info_list = [] # Darknet Format list of string objs_info_dicts = {} for idx, ann_file in enumerate(imgs_ann_file_list): - # print(ann_file) + # logging.info(ann_file) n_objs, objs_info = convert_annotation(ann_file) n_objs_list.append(n_objs) objs_info_list.append(objs_info) @@ -1063,18 +1063,18 @@ def save_npz(save_list=[], name='model.npz', sess=None): for k, value in enumerate(save_list): save_list_var.append(value.eval()) except: - print(" Fail to save model, Hint: pass the session into this function, save_npz(network.all_params, name='model.npz', sess=sess)") + logging.info(" Fail to save model, Hint: pass the session into this function, save_npz(network.all_params, name='model.npz', sess=sess)") np.savez(name, params=save_list_var) save_list_var = None del save_list_var - print("[*] %s saved" % name) + logging.info("[*] %s saved" % name) ## save params into a dictionary # rename_dict = {} # for k, value in enumerate(save_dict): # rename_dict.update({'param'+str(k) : value.eval()}) # np.savez(name, **rename_dict) - # print('Model is saved to: %s' % name) + # logging.info('Model is saved to: %s' % name) def load_npz(path='', name='model.npz'): @@ -1103,10 +1103,10 @@ def load_npz(path='', name='model.npz'): ## if save_npz save params into a dictionary # d = np.load( path+name ) # params = [] - # print('Load Model') + # logging.info('Load Model') # for key, val in sorted( d.items() ): # params.append(val) - # print('Loading %s, %s' % (key, str(val.shape))) + # logging.info('Loading %s, %s' % (key, str(val.shape))) # return params ## if save_npz save params into a list d = np.load(path + name) @@ -1114,7 +1114,7 @@ def load_npz(path='', name='model.npz'): # params = val # return params return d['params'] - # print(d.items()[0][1]['params']) + # logging.info(d.items()[0][1]['params']) # exit() # return d.items()[0][1]['params'] @@ -1180,12 +1180,12 @@ def load_and_assign_npz(sess=None, name=None, network=None): assert network is not None assert sess is not None if not os.path.exists(name): - print("[!] Load {} failed!".format(name)) + logging.info("[!] Load {} failed!".format(name)) return False else: params = load_npz(name=name) assign_params(sess, params, network) - print("[*] Load {} SUCCESS!".format(name)) + logging.info("[*] Load {} SUCCESS!".format(name)) return network @@ -1211,7 +1211,7 @@ def save_npz_dict(save_list=[], name='model.npz', sess=None): save_var_dict = None del save_list_var del save_var_dict - print("[*] Model saved in npz_dict %s" % name) + logging.info("[*] Model saved in npz_dict %s" % name) def load_and_assign_npz_dict(name='model.npz', sess=None): @@ -1225,7 +1225,7 @@ def load_and_assign_npz_dict(name='model.npz', sess=None): """ assert sess is not None if not os.path.exists(name): - print("[!] Load {} failed!".format(name)) + logging.info("[!] Load {} failed!".format(name)) return False params = np.load(name) @@ -1243,12 +1243,12 @@ def load_and_assign_npz_dict(name='model.npz', sess=None): raise KeyError else: ops.append(varlist[0].assign(params[key])) - print("[*] params restored: %s" % key) + logging.info("[*] params restored: %s" % key) except KeyError: - print("[!] Warning: Tensor named %s not found in network." % key) + logging.info("[!] Warning: Tensor named %s not found in network." % key) sess.run(ops) - print("[*] Model restored from npz_dict %s" % name) + logging.info("[*] Model restored from npz_dict %s" % name) # def save_npz_dict(save_list=[], name='model.npz', sess=None): @@ -1276,14 +1276,14 @@ def load_and_assign_npz_dict(name='model.npz', sess=None): # for k, value in enumerate(save_list): # save_list_var.append(value.eval()) # except: -# print(" Fail to save model, Hint: pass the session into this function, save_npz_dict(network.all_params, name='model.npz', sess=sess)") +# logging.info(" Fail to save model, Hint: pass the session into this function, save_npz_dict(network.all_params, name='model.npz', sess=sess)") # save_var_dict = {str(idx):val for idx, val in enumerate(save_list_var)} # np.savez(name, **save_var_dict) # save_list_var = None # save_var_dict = None # del save_list_var # del save_var_dict -# print("[*] %s saved" % name) +# logging.info("[*] %s saved" % name) # # def load_npz_dict(path='', name='model.npz'): # """Load the parameters of a Model saved by tl.files.save_npz_dict(). @@ -1327,11 +1327,11 @@ def save_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list if var_list == []: var_list = tf.global_variables() - print("[*] save %s n_params: %d" % (ckpt_file, len(var_list))) + logging.info("[*] save %s n_params: %d" % (ckpt_file, len(var_list))) if printable: for idx, v in enumerate(var_list): - print(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) + logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) saver = tf.train.Saver(var_list) saver.save(sess, ckpt_file, global_step=global_step) @@ -1371,18 +1371,18 @@ def load_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list if var_list == []: var_list = tf.global_variables() - print("[*] load %s n_params: %d" % (ckpt_file, len(var_list))) + logging.info("[*] load %s n_params: %d" % (ckpt_file, len(var_list))) if printable: for idx, v in enumerate(var_list): - print(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) + logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) try: saver = tf.train.Saver(var_list) saver.restore(sess, ckpt_file) except Exception as e: - print(e) - print("[*] load ckpt fail ...") + logging.info(e) + logging.info("[*] load ckpt fail ...") ## Load and save variables @@ -1415,7 +1415,7 @@ def load_npy_to_any(path='', name='file.npy'): try: return npy except: - print("[!] Fail to load %s" % file_path) + logging.info("[!] Fail to load %s" % file_path) exit() @@ -1475,8 +1475,8 @@ def load_file_list(path=None, regx='\.npz', printable=True): return_list.append(f) # return_list.sort() if printable: - print('Match file list = %s' % return_list) - print('Number of files = %d' % len(return_list)) + logging.info('Match file list = %s' % return_list) + logging.info('Number of files = %d' % len(return_list)) return return_list @@ -1512,12 +1512,12 @@ def exists_or_mkdir(path, verbose=True): """ if not os.path.exists(path): if verbose: - print("[*] creates %s ..." % path) + logging.info("[*] creates %s ..." % path) os.makedirs(path) return False else: if verbose: - print("[!] %s exists ..." % path) + logging.info("[!] %s exists ..." % path) return True @@ -1568,29 +1568,29 @@ def _dlProgress(count, blockSize, totalSize): from urllib.request import urlretrieve filepath = os.path.join(working_directory, filename) urlretrieve(url_source + filename, filepath, reporthook=_dlProgress) + sys.stdout.write('\n') exists_or_mkdir(working_directory, verbose=False) filepath = os.path.join(working_directory, filename) if not os.path.exists(filepath): _download(filename, working_directory, url_source) - print() statinfo = os.stat(filepath) - print('Succesfully downloaded %s %s bytes.' % (filename, statinfo.st_size)) #, 'bytes.') + logging.info('Succesfully downloaded %s %s bytes.' % (filename, statinfo.st_size)) #, 'bytes.') if (not (expected_bytes is None) and (expected_bytes != statinfo.st_size)): raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?') if (extract): if tarfile.is_tarfile(filepath): - print('Trying to extract tar file') + logging.info('Trying to extract tar file') tarfile.open(filepath, 'r').extractall(working_directory) - print('... Success!') + logging.info('... Success!') elif zipfile.is_zipfile(filepath): - print('Trying to extract zip file') + logging.info('Trying to extract zip file') with zipfile.ZipFile(filepath) as zf: zf.extractall(working_directory) - print('... Success!') + logging.info('... Success!') else: - print("Unknown compression_format only .tar.gz/.tar.bz2/.tar and .zip supported") + logging.info("Unknown compression_format only .tar.gz/.tar.bz2/.tar and .zip supported") return filepath @@ -1638,5 +1638,5 @@ def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'): file_list = load_file_list(path=path, regx=regx) for f in file_list: W = load_npz(path, f)[0] - print("%s --> %s" % (f, f.split('.')[0] + '.pdf')) + logging.info("%s --> %s" % (f, f.split('.')[0] + '.pdf')) visualize.W(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012) diff --git a/tensorlayer/nlp.py b/tensorlayer/nlp.py index be1d438fc..3083d55cd 100755 --- a/tensorlayer/nlp.py +++ b/tensorlayer/nlp.py @@ -1,4 +1,3 @@ -#! /usr/bin/python # -*- coding: utf-8 -*- import collections @@ -15,6 +14,7 @@ import tensorflow as tf from six.moves import urllib, xrange from tensorflow.python.platform import gfile +from . import _logging as logging # Iteration functions @@ -127,17 +127,17 @@ def sample(a=[], temperature=1.0): return np.argmax(np.random.multinomial(1, a, 1)) except: # np.set_printoptions(threshold=np.nan) - # print(a) - # print(np.sum(a)) - # print(np.max(a)) - # print(np.min(a)) + # logging.info(a) + # logging.info(np.sum(a)) + # logging.info(np.max(a)) + # logging.info(np.min(a)) # exit() message = "For large vocabulary_size, choice a higher temperature\ to avoid log error. Hint : use ``sample_top``. " warnings.warn(message, Warning) - # print(a) - # print(b) + # logging.info(a) + # logging.info(b) return np.argmax(np.random.multinomial(1, b, 1)) @@ -153,7 +153,7 @@ def sample_top(a=[], top_k=10): """ idx = np.argpartition(a, -top_k)[-top_k:] probs = a[idx] - # print("new", probs) + # logging.info("new %f" % probs) probs = probs / np.sum(probs) choice = np.random.choice(idx, p=probs) return choice @@ -163,7 +163,7 @@ def sample_top(a=[], top_k=10): # idx = idx[:top_k] # # a = a[idx] # probs = a[idx] - # print("prev", probs) + # logging.info("prev %f" % probs) # # probs = probs / np.sum(probs) # # choice = np.random.choice(idx, p=probs) # # return choice @@ -234,8 +234,8 @@ class Vocabulary(object): def __init__(self, vocab_file, start_word="", end_word="", unk_word="", pad_word=""): if not tf.gfile.Exists(vocab_file): - tf.logging.fatal("Vocab file %s not found.", vocab_file) - tf.logging.info("Initializing vocabulary from file: %s", vocab_file) + tf.logging.fatal("Vocab file %s not found." % vocab_file) + tf.logging.info("Initializing vocabulary from file: %s" % vocab_file) with tf.gfile.GFile(vocab_file, mode="r") as f: reverse_vocab = list(f.readlines()) @@ -253,8 +253,8 @@ def __init__(self, vocab_file, start_word="", end_word="", unk_word="', 'one', 'two', ',', 'three', ''], ['', 'four', 'five', 'five', '']] >>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1) - ... [TL] Creating vocabulary. + ... Creating vocabulary. ... Total words: 8 ... Words in vocabulary: 8 ... Wrote vocabulary file: vocab.txt @@ -373,24 +373,24 @@ def create_vocab(sentences, word_counts_output_file, min_word_count=1): ... pad_id: 0 """ from collections import Counter - print(" [TL] Creating vocabulary.") + logging.info("Creating vocabulary.") counter = Counter() for c in sentences: counter.update(c) - # print('c',c) - print(" Total words: %d" % len(counter)) + # logging.info('c',c) + logging.info(" Total words: %d" % len(counter)) # Filter uncommon words and sort by descending count. word_counts = [x for x in counter.items() if x[1] >= min_word_count] word_counts.sort(key=lambda x: x[1], reverse=True) word_counts = [("", 0)] + word_counts # 1st id should be reserved for padding - # print(word_counts) - print(" Words in vocabulary: %d" % len(word_counts)) + # logging.info(word_counts) + logging.info(" Words in vocabulary: %d" % len(word_counts)) # Write out the word counts file. with tf.gfile.FastGFile(word_counts_output_file, "w") as f: f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts])) - print(" Wrote vocabulary file: %s" % word_counts_output_file) + logging.info(" Wrote vocabulary file: %s" % word_counts_output_file) # Create the vocabulary dictionary. reverse_vocab = [x[0] for x in word_counts] @@ -506,9 +506,9 @@ def read_analogies_file(eval_file='questions-words.txt', word2id={}): questions_skipped += 1 else: questions.append(np.array(ids)) - print("Eval analogy file: ", eval_file) - print("Questions: ", len(questions)) - print("Skipped: ", questions_skipped) + logging.info("Eval analogy file: %s" % eval_file) + logging.info("Questions: %d", len(questions)) + logging.info("Skipped: %d", questions_skipped) analogy_questions = np.array(questions, dtype=np.int32) return analogy_questions @@ -541,13 +541,13 @@ def build_vocab(data): """ # data = _read_words(filename) counter = collections.Counter(data) - # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1 + # logging.info('counter %s' % counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1 count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) - # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1) + # logging.info('count_pairs %s' % count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1) words, _ = list(zip(*count_pairs)) word_to_id = dict(zip(words, range(len(words)))) - # print(words) # list of words - # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 + # logging.info(words) # list of words + # logging.info(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 return word_to_id @@ -627,8 +627,8 @@ def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) if printable: - print('Real vocabulary size %d' % len(collections.Counter(words).keys())) - print('Limited vocabulary size {}'.format(vocabulary_size)) + logging.info('Real vocabulary size %d' % len(collections.Counter(words).keys())) + logging.info('Limited vocabulary size {}'.format(vocabulary_size)) assert len(collections.Counter(words).keys()) >= vocabulary_size, \ "the limited vocabulary_size must be less than or equal to the read vocabulary_size" return data, count, dictionary, reverse_dictionary @@ -670,10 +670,10 @@ def words_to_word_ids(data=[], word_to_id={}, unk_key='UNK'): - `tensorflow.models.rnn.ptb.reader `_ """ # if isinstance(data[0], six.string_types): - # print(type(data[0])) + # logging.info(type(data[0])) # # exit() - # print(data[0]) - # print(word_to_id) + # logging.info(data[0]) + # logging.info(word_to_id) # return [word_to_id[str(word)] for word in data] # else: @@ -687,11 +687,11 @@ def words_to_word_ids(data=[], word_to_id={}, unk_key='UNK'): # return [word_to_id[word] for word in data] # this one # if isinstance(data[0], str): - # # print('is a string object') + # # logging.info('is a string object') # return [word_to_id[word] for word in data] # else:#if isinstance(s, bytes): - # # print('is a unicode object') - # # print(data[0]) + # # logging.info('is a unicode object') + # # logging.info(data[0]) # return [word_to_id[str(word)] f @@ -749,7 +749,7 @@ def save_vocab(count=[], name='vocab.txt'): with open(os.path.join(pwd, name), "w") as f: for i in xrange(vocabulary_size): f.write("%s %d\n" % (tf.compat.as_text(count[i][0]), count[i][1])) - print("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd)) + logging.info("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd)) # Functions for translation @@ -772,7 +772,7 @@ def basic_tokenizer(sentence, _WORD_SPLIT=re.compile(b"([.,!?\"':;)(])")): >>> with gfile.GFile(train_path + ".en", mode="rb") as f: >>> for line in f: >>> tokens = tl.nlp.basic_tokenizer(line) - >>> print(tokens) + >>> logging.info(tokens) >>> exit() ... [b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How', ... b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home', @@ -821,14 +821,14 @@ def create_vocabulary(vocabulary_path, - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` """ if not gfile.Exists(vocabulary_path): - print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path)) + logging.info("Creating vocabulary %s from data %s" % (vocabulary_path, data_path)) vocab = {} with gfile.GFile(data_path, mode="rb") as f: counter = 0 for line in f: counter += 1 if counter % 100000 == 0: - print(" processing line %d" % counter) + logging.info(" processing line %d" % counter) tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) for w in tokens: word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w @@ -843,7 +843,7 @@ def create_vocabulary(vocabulary_path, for w in vocab_list: vocab_file.write(w + b"\n") else: - print("Vocabulary %s from data %s exists" % (vocabulary_path, data_path)) + logging.info("Vocabulary %s from data %s exists" % (vocabulary_path, data_path)) def initialize_vocabulary(vocabulary_path): @@ -948,7 +948,7 @@ def data_to_token_ids(data_path, target_path, vocabulary_path, tokenizer=None, n - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` """ if not gfile.Exists(target_path): - print("Tokenizing data in %s" % data_path) + logging.info("Tokenizing data in %s" % data_path) vocab, _ = initialize_vocabulary(vocabulary_path) with gfile.GFile(data_path, mode="rb") as data_file: with gfile.GFile(target_path, mode="w") as tokens_file: @@ -956,11 +956,11 @@ def data_to_token_ids(data_path, target_path, vocabulary_path, tokenizer=None, n for line in data_file: counter += 1 if counter % 100000 == 0: - print(" tokenizing line %d" % counter) + logging.info(" tokenizing line %d" % counter) token_ids = sentence_to_token_ids(line, vocab, tokenizer, normalize_digits, UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE) tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") else: - print("Target path %s exists" % target_path) + logging.info("Target path %s exists" % target_path) def moses_multi_bleu(hypotheses, references, lowercase=False): # tl.nlp diff --git a/tensorlayer/ops.py b/tensorlayer/ops.py index 0cc5a0c95..630071879 100644 --- a/tensorlayer/ops.py +++ b/tensorlayer/ops.py @@ -1,4 +1,3 @@ -#! /usr/bin/python # -*- coding: utf-8 -*- import os @@ -10,6 +9,7 @@ import tensorflow as tf import tensorlayer as tl +from . import _logging as logging def exit_tf(sess=None, port=6006): @@ -29,19 +29,19 @@ def exit_tf(sess=None, port=6006): # import time # time.sleep(2) if _platform == "linux" or _platform == "linux2": - print('linux: %s' % text) + logging.info('linux: %s' % text) os.system('nvidia-smi') os.system('fuser ' + port + '/tcp -k') # kill tensorboard 6006 os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process _exit() elif _platform == "darwin": - print('OS X: %s' % text) + logging.info('OS X: %s' % text) subprocess.Popen("lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True) # kill tensorboard elif _platform == "win32": - print(text2 + "Windows") + logging.info(text2 + "Windows") # TODO else: - print(text2 + _platform) + logging.info(text2 + _platform) def open_tb(logdir='/tmp/tensorflow', port=6006): @@ -58,21 +58,21 @@ def open_tb(logdir='/tmp/tensorflow', port=6006): text2 = " not yet supported by this function (tl.ops.open_tb)" if not tl.files.exists_or_mkdir(logdir, verbose=False): - print("[TL] Log reportory was created at %s" % logdir) + logging.info("[TL] Log reportory was created at %s" % logdir) if _platform == "linux" or _platform == "linux2": - print('linux %s' % text2) + logging.info('linux %s' % text2) # TODO elif _platform == "darwin": - print('OS X: %s' % text) + logging.info('OS X: %s' % text) subprocess.Popen( sys.prefix + " | python -m tensorflow.tensorboard --logdir=" + logdir + " --port=" + str(port), shell=True) # open tensorboard in localhost:6006/ or whatever port you chose elif _platform == "win32": - print('Windows%s' % text2) + logging.info('Windows%s' % text2) # TODO else: - print(_platform + text2) + logging.info(_platform + text2) def clear_all(printable=True): @@ -84,7 +84,7 @@ def clear_all(printable=True): printable : boolean If True, print all deleted variables. """ - print('clear all .....................................') + logging.info('clear all .....................................') gl = globals().copy() for var in gl: if var[0] == '_': continue @@ -93,7 +93,7 @@ def clear_all(printable=True): if 'class' in str(globals()[var]): continue if printable: - print(" clear_all ------- %s" % str(globals()[var])) + logging.info(" clear_all ------- %s" % str(globals()[var])) del globals()[var] @@ -106,7 +106,7 @@ def clear_all(printable=True): # ---------- # printable : if True, print all deleted variables. # """ -# print('clear all .....................................') +# logging.info('clear all .....................................') # for var in vars: # if var[0] == '_': continue # if 'func' in str(var): continue @@ -114,7 +114,7 @@ def clear_all(printable=True): # if 'class' in str(var): continue # # if printable: -# print(" clear_all ------- %s" % str(var)) +# logging.info(" clear_all ------- %s" % str(var)) # # del var @@ -133,7 +133,7 @@ def set_gpu_fraction(sess=None, gpu_fraction=0.3): ---------- - `TensorFlow using GPU `_ """ - print("[TL]: GPU MEM Fraction %f" % gpu_fraction) + logging.info("[TL]: GPU MEM Fraction %f" % gpu_fraction) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) return sess @@ -237,10 +237,10 @@ def get_site_packages_directory(): import site try: loc = site.getsitepackages() - print("[TL] tl.ops : site-packages in ", loc) + logging.info("[TL] tl.ops : site-packages in %s " % loc) return loc except: - print("[TL] tl.ops : Cannot find package dir from virtual environment") + logging.info("[TL] tl.ops : Cannot find package dir from virtual environment") return False @@ -250,13 +250,13 @@ def empty_trash(): """ text = "[TL] Empty the trash" if _platform == "linux" or _platform == "linux2": - print('linux: %s' % text) + logging.info('linux: %s' % text) os.system("rm -rf ~/.local/share/Trash/*") elif _platform == "darwin": - print('OS X: %s' % text) + logging.info('OS X: %s' % text) os.system("sudo rm -rf ~/.Trash/*") elif _platform == "win32": - print('Windows: %s' % text) + logging.info('Windows: %s' % text) try: os.system("rd /s c:\$Recycle.Bin") # Windows 7 or Server 2008 except: @@ -266,7 +266,7 @@ def empty_trash(): except: pass else: - print(_platform) + logging.info(_platform) # diff --git a/tensorlayer/prepro.py b/tensorlayer/prepro.py index 5f53b3fd1..d3bf669b8 100644 --- a/tensorlayer/prepro.py +++ b/tensorlayer/prepro.py @@ -1,4 +1,3 @@ -#! /usr/bin/python # -*- coding: utf-8 -*- import numbers @@ -91,7 +90,7 @@ def threading_data(data=None, fn=None, thread_count=None, **kwargs): ## plot function info # for name, value in kwargs.items(): - # print('{0} = {1}'.format(name, value)) + # logging.info('{0} = {1}'.format(name, value)) # exit() # define function for threading def apply_fn(results, i, data, kwargs): @@ -224,7 +223,7 @@ def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1, channel_index=2 if is_random: h_offset = int(np.random.uniform(0, h - hrg) - 1) w_offset = int(np.random.uniform(0, w - wrg) - 1) - # print(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape) + # logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape) return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset] else: # central crop h_offset = int(np.floor((h - hrg) / 2.)) @@ -235,7 +234,7 @@ def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1, channel_index=2 # old implementation # h_offset = (h - hrg)/2 # w_offset = (w - wrg)/2 - # # print(x[h_offset: h-h_offset ,w_offset: w-w_offset].shape) + # # logging.info(x[h_offset: h-h_offset ,w_offset: w-w_offset].shape) # return x[h_offset: h-h_offset ,w_offset: w-w_offset] # central crop @@ -755,7 +754,7 @@ def elastic_transform_multi(x, alpha, sigma, mode="constant", cval=0, is_random= x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij') indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1)) - # print(data.shape) + # logging.info(data.shape) if is_3d: results.append(map_coordinates(data, indices, order=1).reshape((shape[0], shape[1], 1))) else: @@ -795,12 +794,12 @@ def zoom(x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, ch if is_random: if zoom_range[0] == 1 and zoom_range[1] == 1: zx, zy = 1, 1 - print(" random_zoom : not zoom in/out") + logging.info(" random_zoom : not zoom in/out") else: zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) else: zx, zy = zoom_range - # print(zx, zy) + # logging.info(zx, zy) zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) h, w = x.shape[row_index], x.shape[col_index] @@ -825,7 +824,7 @@ def zoom_multi(x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index if is_random: if zoom_range[0] == 1 and zoom_range[1] == 1: zx, zy = 1, 1 - print(" random_zoom : not zoom in/out") + logging.info(" random_zoom : not zoom in/out") else: zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) else: @@ -940,7 +939,7 @@ def illumination(x, gamma=1., contrast=1., saturation=1., is_random=False): gamma = 1 im_ = brightness(x, gamma=gamma, gain=1, is_random=False) - # print("using contrast and saturation") + # logging.info("using contrast and saturation") image = Image.fromarray(im_) # array -> PIL contrast_adjust = ImageEnhance.Contrast(image) image = contrast_adjust.enhance(np.random.uniform(contrast[0], contrast[1])) #0.3,0.9)) @@ -1221,11 +1220,11 @@ def get_zca_whitening_principal_components_img(X): Batch of image with dimension of [n_example, row, col, channel] (default). """ flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) - print("zca : computing sigma ..") + logging.info("zca : computing sigma ..") sigma = np.dot(flatX.T, flatX) / flatX.shape[0] - print("zca : computing U, S and V ..") + logging.info("zca : computing U, S and V ..") U, S, V = linalg.svd(sigma) - print("zca : computing principal components ..") + logging.info("zca : computing principal components ..") principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T) return principal_components @@ -1240,10 +1239,10 @@ def zca_whitening(x, principal_components): principal_components : matrix from ``get_zca_whitening_principal_components_img``. """ flatx = np.reshape(x, (x.size)) - # print(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1)) + # logging.info(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1)) # flatx = np.reshape(x, (x.shape)) # flatx = np.reshape(x, (x.shape[0], )) - # print(flatx.shape) # (160, 176, 1) + # logging.info(flatx.shape) # (160, 176, 1) whitex = np.dot(flatx, principal_components) x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2])) return x @@ -1354,11 +1353,11 @@ def drop(x, keep=0.5): # x = np.asarray([[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]]) # x = np.asarray([x,x,x,x,x,x]) # x.shape = 10, 4, 3 -# # print(x) +# # logging.info(x) # # exit() -# print(x.shape) +# logging.info(x.shape) # # exit() -# print(drop(x, keep=1.)) +# logging.info(drop(x, keep=1.)) # exit() @@ -1514,7 +1513,7 @@ def array_to_img(x, dim_ordering=(0, 1, 2), scale=True): x += max(-np.min(x), 0) x_max = np.max(x) if x_max != 0: - # print(x_max) + # logging.info(x_max) # x /= x_max x = x / x_max x *= 255 @@ -1556,7 +1555,7 @@ def pt2map(list_points=[], size=(100, 100), val=1): return i_m for xx in list_points: for x in xx: - # print(x) + # logging.info(x) i_m[int(np.round(x[0]))][int(np.round(x[1]))] = val return i_m @@ -1676,7 +1675,7 @@ def obj_box_coord_rescale(coord=[], shape=[100, 200]): # coord = obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100]) -# print(coord) #[[0.15, 0.4, 0.25, 0.5]] +# logging.info(coord) #[[0.15, 0.4, 0.25, 0.5]] # exit() @@ -1703,13 +1702,13 @@ def obj_box_coord_scale_to_pixelunit(coord, shape=(100, 100, 3)): # coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100]) -# print(coords) +# logging.info(coords) # # ... [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]] # coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100]) -# print(coords) +# logging.info(coords) # # ... [[0.3, 0.8, 0.5, 1.0]] # coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200]) -# print(coords) +# logging.info(coords) # # ... [[0.15, 0.4, 0.25, 0.5]] # exit() @@ -1735,7 +1734,7 @@ def obj_box_coord_centroid_to_upleft_butright(coord, to_int=False): # coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20]) -# print(coord) [20, 30, 40, 50] +# logging.info(coord) [20, 30, 40, 50] # exit() @@ -1870,16 +1869,16 @@ def _flip(im, coords): # im = np.zeros([80, 100]) # as an image with shape width=100, height=80 # im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False) -# print(coords) +# logging.info(coords) # # ... [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]] # im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False) -# print(coords) +# logging.info(coords) # # [[0.5, 0.4, 0.3, 0.3]] # im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False) -# print(coords) +# logging.info(coords) # # ... [[80, 40, 30, 30]] # im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False) -# print(coords) +# logging.info(coords) # # [[50, 40, 30, 30]] # exit() @@ -1924,7 +1923,7 @@ def obj_box_imresize(im, coords=[], size=[100, 100], interp='bicubic', mode=None # x' = x * (imw'/imw) x = int(coord[0] * (size[1] / imw)) # y' = y * (imh'/imh) - # print('>>', coord[1], size[0], imh) + # logging.info('>>', coord[1], size[0], imh) y = int(coord[1] * (size[0] / imh)) # w' = w * (imw'/imw) w = int(coord[2] * (size[1] / imw)) @@ -1938,16 +1937,16 @@ def obj_box_imresize(im, coords=[], size=[100, 100], interp='bicubic', mode=None # im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80 # _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False) -# print(coords) +# logging.info(coords) # # ... [[40, 80, 60, 60], [20, 40, 40, 40]] # _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False) -# print(coords) +# logging.info(coords) # # ... [20, 20, 30, 15] # _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False) -# print(coords) +# logging.info(coords) # # ... [30, 30, 45, 22] # im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True) -# print(coords, im2.shape) +# logging.info(coords, im2.shape) # # ... [0.2, 0.4, 0.3, 0.3] (160, 200, 3) # exit() @@ -2040,11 +2039,11 @@ def _get_coord(coord): h = im_new.shape[0] - y if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow - # print('xx', w, h) + # logging.info('xx', w, h) return None if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) < thresh_wh): # object shape strange: too narrow - # print('yy', w, im_new.shape[1], h, im_new.shape[0]) + # logging.info('yy', w, im_new.shape[1], h, im_new.shape[0]) return None coord = [x, y, w, h] @@ -2161,11 +2160,11 @@ def _get_coord(coord): h = im_new.shape[0] - y if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow - # print('xx', w, h) + # logging.info('xx', w, h) return None if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) < thresh_wh): # object shape strange: too narrow - # print('yy', w, im_new.shape[1], h, im_new.shape[0]) + # logging.info('yy', w, im_new.shape[1], h, im_new.shape[0]) return None coord = [x, y, w, h] @@ -2236,12 +2235,12 @@ def obj_box_zoom(im, if is_random: if zoom_range[0] == 1 and zoom_range[1] == 1: zx, zy = 1, 1 - print(" random_zoom : not zoom in/out") + logging.info(" random_zoom : not zoom in/out") else: zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) else: zx, zy = zoom_range - # print(zx, zy) + # logging.info(zx, zy) zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) h, w = im.shape[row_index], im.shape[col_index] @@ -2286,11 +2285,11 @@ def _get_coord(coord): h = im_new.shape[0] - y if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow - # print('xx', w, h) + # logging.info('xx', w, h) return None if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) < thresh_wh): # object shape strange: too narrow - # print('yy', w, im_new.shape[1], h, im_new.shape[0]) + # logging.info('yy', w, im_new.shape[1], h, im_new.shape[0]) return None coord = [x, y, w, h] @@ -2624,13 +2623,13 @@ def sequences_get_mask(sequences, pad_val=0): # ----------- # - `tensorflow.models.image.cifar10.cifar10_input `_ # """ -# print("This function is deprecated, please use tf.map_fn instead, e.g:\n \ +# logging.info("This function is deprecated, please use tf.map_fn instead, e.g:\n \ # t_image = tf.map_fn(lambda img: tf.image.random_brightness(img, max_delta=32. / 255.), t_image)\n \ # t_image = tf.map_fn(lambda img: tf.image.random_contrast(img, lower=0.5, upper=1.5), t_image)\n \ # t_image = tf.map_fn(lambda img: tf.image.random_saturation(img, lower=0.5, upper=1.5), t_image)\n \ # t_image = tf.map_fn(lambda img: tf.image.random_hue(img, max_delta=0.032), t_image)") # exit() -# # print(" [Warning] distorted_images will be deprecated due to speed, see TFRecord tutorial for more info...") +# # logging.info(" [Warning] distorted_images will be deprecated due to speed, see TFRecord tutorial for more info...") # try: # batch_size = int(images._shape[0]) # except: @@ -2701,13 +2700,13 @@ def sequences_get_mask(sequences, pad_val=0): # ---------------- # - ``tensorflow.models.image.cifar10.cifar10_input`` # """ -# print("This function is deprecated, please use tf.map_fn instead, e.g:\n \ +# logging.info("This function is deprecated, please use tf.map_fn instead, e.g:\n \ # t_image = tf.map_fn(lambda img: tf.image.random_brightness(img, max_delta=32. / 255.), t_image)\n \ # t_image = tf.map_fn(lambda img: tf.image.random_contrast(img, lower=0.5, upper=1.5), t_image)\n \ # t_image = tf.map_fn(lambda img: tf.image.random_saturation(img, lower=0.5, upper=1.5), t_image)\n \ # t_image = tf.map_fn(lambda img: tf.image.random_hue(img, max_delta=0.032), t_image)") # exit() -# # print(" [Warning] crop_central_whiten_images will be deprecated due to speed, see TFRecord tutorial for more info...") +# # logging.info(" [Warning] crop_central_whiten_images will be deprecated due to speed, see TFRecord tutorial for more info...") # try: # batch_size = int(images._shape[0]) # except: diff --git a/tensorlayer/utils.py b/tensorlayer/utils.py index a40404b45..501716bc0 100644 --- a/tensorlayer/utils.py +++ b/tensorlayer/utils.py @@ -1,4 +1,3 @@ -#! /usr/bin/python # -*- coding: utf-8 -*- import math import random @@ -9,6 +8,7 @@ import tensorlayer as tl from . import iterate +from . import _logging as logging def fit(sess, @@ -94,7 +94,7 @@ def fit(sess, assert X_train.shape[0] >= batch_size, "Number of training examples should be bigger than the batch size" if (tensorboard): - print("Setting up tensorboard ...") + logging.info("Setting up tensorboard ...") #Set up tensorboard summaries and saver tl.files.exists_or_mkdir('logs/') @@ -111,7 +111,7 @@ def fit(sess, if (tensorboard_weight_histograms): for param in network.all_params: if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'): - print('Param name ', param.name) + logging.info('Param name %s' % param.name) tf.summary.histogram(param.name, param) if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'): @@ -121,9 +121,9 @@ def fit(sess, #Initalize all variables and summaries tl.layers.initialize_global_variables(sess) - print("Finished! use $tensorboard --logdir=logs/ to start server") + logging.info("Finished! use $tensorboard --logdir=logs/ to start server") - print("Start training the network ...") + logging.info("Start training the network ...") start_time_begin = time.time() tensorboard_train_index, tensorboard_val_index = 0, 0 for epoch in range(n_epoch): @@ -158,7 +158,7 @@ def fit(sess, if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: if (X_val is not None) and (y_val is not None): - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) + logging.info("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) if eval_train is True: train_loss, train_acc, n_batch = 0, 0, 0 for X_train_a, y_train_a in iterate.minibatches(X_train, y_train, batch_size, shuffle=True): @@ -172,9 +172,9 @@ def fit(sess, err = sess.run(cost, feed_dict=feed_dict) train_loss += err n_batch += 1 - print(" train loss: %f" % (train_loss / n_batch)) + logging.info(" train loss: %f" % (train_loss / n_batch)) if acc is not None: - print(" train acc: %f" % (train_acc / n_batch)) + logging.info(" train acc: %f" % (train_acc / n_batch)) val_loss, val_acc, n_batch = 0, 0, 0 for X_val_a, y_val_a in iterate.minibatches(X_val, y_val, batch_size, shuffle=True): dp_dict = dict_to_one(network.all_drop) # disable noise layers @@ -187,12 +187,12 @@ def fit(sess, err = sess.run(cost, feed_dict=feed_dict) val_loss += err n_batch += 1 - print(" val loss: %f" % (val_loss / n_batch)) + logging.info(" val loss: %f" % (val_loss / n_batch)) if acc is not None: - print(" val acc: %f" % (val_acc / n_batch)) + logging.info(" val acc: %f" % (val_acc / n_batch)) else: - print("Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep)) - print("Total training time: %fs" % (time.time() - start_time_begin)) + logging.info("Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep)) + logging.info("Total training time: %fs" % (time.time() - start_time_begin)) def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None): @@ -226,15 +226,15 @@ def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None): >>> see tutorial_mnist_simple.py >>> tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost) """ - print('Start testing the network ...') + logging.info('Start testing the network ...') if batch_size is None: dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X_test, y_: y_test} feed_dict.update(dp_dict) if cost is not None: - print(" test loss: %f" % sess.run(cost, feed_dict=feed_dict)) - print(" test acc: %f" % sess.run(acc, feed_dict=feed_dict)) - # print(" test acc: %f" % np.mean(y_test == sess.run(y_op, + logging.info(" test loss: %f" % sess.run(cost, feed_dict=feed_dict)) + logging.info(" test acc: %f" % sess.run(acc, feed_dict=feed_dict)) + # logging.info(" test acc: %f" % np.mean(y_test == sess.run(y_op, # feed_dict=feed_dict))) else: test_loss, test_acc, n_batch = 0, 0, 0 @@ -250,8 +250,8 @@ def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None): test_acc += ac n_batch += 1 if cost is not None: - print(" test loss: %f" % (test_loss / n_batch)) - print(" test acc: %f" % (test_acc / n_batch)) + logging.info(" test loss: %f" % (test_loss / n_batch)) + logging.info(" test acc: %f" % (test_acc / n_batch)) def predict(sess, network, X, x, y_op, batch_size=None): @@ -300,7 +300,7 @@ def predict(sess, network, X, x, y_op, batch_size=None): if result is None: result = result_a else: - result = np.vstack((result, result_a)) # TODO: https://github.com/tensorlayer/tensorlayer/issues/288 + result = np.vstack((result, result_a)) # TODO: https://github.com/tensorlayer/tensorlayer/issues/288 if result is None: if len(X) % batch_size != 0: dp_dict = dict_to_one(network.all_drop) @@ -318,7 +318,7 @@ def predict(sess, network, X, x, y_op, batch_size=None): } feed_dict.update(dp_dict) result_a = sess.run(y_op, feed_dict=feed_dict) - result = np.vstack((result, result_a)) # TODO: https://github.com/tensorlayer/tensorlayer/issues/288 + result = np.vstack((result, result_a)) # TODO: https://github.com/tensorlayer/tensorlayer/issues/288 return result @@ -347,10 +347,10 @@ def evaluation(y_test=None, y_predict=None, n_classes=None): f1 = f1_score(y_test, y_predict, average=None, labels=[x for x in range(n_classes)]) f1_macro = f1_score(y_test, y_predict, average='macro') acc = accuracy_score(y_test, y_predict) - print('confusion matrix: \n', c_mat) - print('f1-score:', f1) - print('f1-score(macro):', f1_macro) # same output with > f1_score(y_true, y_pred, average='macro') - print('accuracy-score:', acc) + logging.info('confusion matrix: \n%s' % c_mat) + logging.info('f1-score : %s' % f1) + logging.info('f1-score(macro) : %f' % f1_macro) # same output with > f1_score(y_true, y_pred, average='macro') + logging.info('accuracy-score : %f' % acc) return c_mat, f1, acc, f1_macro @@ -411,16 +411,16 @@ def class_balancing_oversample(X_train=None, y_train=None, printable=True): """ # ======== Classes balancing if printable: - print("Classes balancing for training examples...") + logging.info("Classes balancing for training examples...") from collections import Counter c = Counter(y_train) if printable: - print('the occurrence number of each stage: %s' % c.most_common()) - print('the least stage is Label %s have %s instances' % c.most_common()[-1]) - print('the most stage is Label %s have %s instances' % c.most_common(1)[0]) + logging.info('the occurrence number of each stage: %s' % c.most_common()) + logging.info('the least stage is Label %s have %s instances' % c.most_common()[-1]) + logging.info('the most stage is Label %s have %s instances' % c.most_common(1)[0]) most_num = c.most_common(1)[0][1] if printable: - print('most num is %d, all classes tend to be this num' % most_num) + logging.info('most num is %d, all classes tend to be this num' % most_num) locations = {} number = {} @@ -429,14 +429,14 @@ def class_balancing_oversample(X_train=None, y_train=None, printable=True): number[lab] = num locations[lab] = np.where(np.array(y_train) == lab)[0] if printable: - print('convert list(np.array) to dict format') + logging.info('convert list(np.array) to dict format') X = {} # convert list to dict for lab, num in number.items(): X[lab] = X_train[locations[lab]] # oversampling if printable: - print('start oversampling') + logging.info('start oversampling') for key in X: temp = X[key] while True: @@ -444,28 +444,28 @@ def class_balancing_oversample(X_train=None, y_train=None, printable=True): break X[key] = np.vstack((X[key], temp)) if printable: - print('first features of label 0 >', len(X[0][0])) - print('the occurrence num of each stage after oversampling') + logging.info('first features of label 0 > %d' % len(X[0][0])) + logging.info('the occurrence num of each stage after oversampling') for key in X: - print(key, len(X[key])) + logging.info("%s %d" % (key, len(X[key]))) if printable: - print('make each stage have same num of instances') + logging.info('make each stage have same num of instances') for key in X: X[key] = X[key][0:most_num, :] - print(key, len(X[key])) + logging.info("%s %d" % (key, len(X[key]))) # convert dict to list if printable: - print('convert from dict to list format') + logging.info('convert from dict to list format') y_train = [] X_train = np.empty(shape=(0, len(X[0][0]))) for key in X: X_train = np.vstack((X_train, X[key])) y_train.extend([key for i in range(len(X[key]))]) - # print(len(X_train), len(y_train)) + # logging.info(len(X_train), len(y_train)) c = Counter(y_train) if printable: - print('the occurrence number of each stage after oversampling: %s' % c.most_common()) + logging.info('the occurrence number of each stage after oversampling: %s' % c.most_common()) # ================ End of Classes balancing return X_train, y_train diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py index 4a959c753..684c2e84e 100644 --- a/tensorlayer/visualize.py +++ b/tensorlayer/visualize.py @@ -1,4 +1,3 @@ -#! /usr/bin/python # -*- coding: utf-8 -*- import os @@ -9,6 +8,7 @@ import scipy.misc from . import prepro +from . import _logging as logging ## use this, if you got the following error: # _tkinter.TclError: no display name and no $DISPLAY environment variable @@ -40,10 +40,10 @@ def read_images(img_list, path='', n_threads=10, printable=True): for idx in range(0, len(img_list), n_threads): b_imgs_list = img_list[idx:idx + n_threads] b_imgs = prepro.threading_data(b_imgs_list, fn=read_image, path=path) - # print(b_imgs.shape) + # logging.info(b_imgs.shape) imgs.extend(b_imgs) if printable: - print('read %d from %s' % (len(imgs), path)) + logging.info('read %d from %s' % (len(imgs), path)) return imgs @@ -76,9 +76,9 @@ def save_images(images, size, image_path=''): >>> images = np.random.rand(64, 100, 100, 3) >>> tl.visualize.save_images(images, [8, 8], 'temp.png') """ - if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1] - images = images[:,:,:,np.newaxis] - + if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1] + images = images[:, :, :, np.newaxis] + def merge(images, size): h, w = images.shape[1], images.shape[2] img = np.zeros((h * size[0], w * size[1], 3)) @@ -163,7 +163,7 @@ def draw_boxes_and_labels_to_image(image, classes=[], coords=[], scores=[], clas # cv2.imwrite('_my.png', image) save_image(image, save_name) # if len(coords) == 0: - # print("draw_boxes_and_labels_to_image: no bboxes exist, cannot draw !") + # logging.info("draw_boxes_and_labels_to_image: no bboxes exist, cannot draw !") return image @@ -211,7 +211,7 @@ def W(W=None, second=10, saveable=True, shape=[28, 28], name='mnist', fig_idx=23 feature = W[:, count - 1] / np.sqrt((W[:, count - 1]**2).sum()) # feature[feature<0.0001] = 0 # value threshold # if count == 1 or count == 2: - # print(np.mean(feature)) + # logging.info(np.mean(feature)) # if np.std(feature) < 0.03: # condition threshold # feature = np.zeros_like(feature) # if np.mean(feature) < -0.015: # condition threshold @@ -295,7 +295,7 @@ def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362): >>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012) """ import matplotlib.pyplot as plt - # print(CNN.shape) # (5, 5, 3, 64) + # logging.info(CNN.shape) # (5, 5, 3, 64) # exit() n_mask = CNN.shape[3] n_row = CNN.shape[0] @@ -311,7 +311,7 @@ def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362): if count > n_mask: break a = fig.add_subplot(col, row, count) - # print(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 + # logging.info(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 # exit() # plt.imshow( # np.reshape(CNN[count-1,:,:,:], (n_row, n_col)), @@ -356,7 +356,7 @@ def images2d(images=None, second=10, saveable=True, name='images', dtype=None, f >>> tl.visualize.images2d(X_train[0:100,:,:,:], second=10, saveable=False, name='cifar10', dtype=np.uint8, fig_idx=20212) """ import matplotlib.pyplot as plt - # print(images.shape) # (50000, 32, 32, 3) + # logging.info(images.shape) # (50000, 32, 32, 3) # exit() if dtype: images = np.asarray(images, dtype=dtype) @@ -374,7 +374,7 @@ def images2d(images=None, second=10, saveable=True, name='images', dtype=None, f if count > n_mask: break a = fig.add_subplot(col, row, count) - # print(images[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 + # logging.info(images[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 # plt.imshow( # np.reshape(images[count-1,:,:,:], (n_row, n_col)), # cmap='gray', interpolation="nearest") # theano @@ -453,4 +453,4 @@ def plot_with_labels(low_dim_embs, labels, figsize=(18, 18), second=5, saveable= plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, \ name=name, fig_idx=fig_idx) except ImportError: - print("Please install sklearn and matplotlib to visualize embeddings.") + logging.info("Please install sklearn and matplotlib to visualize embeddings.")