From 3ca9ea9a5adf259f101af2dd9fcd49d22eea70d6 Mon Sep 17 00:00:00 2001 From: Anastasia Murzova Date: Thu, 3 Dec 2020 01:54:48 +0300 Subject: [PATCH] Added data for TF resize_bilinear layer test Test data for OpenCV PR #18996 --- testdata/dnn/tensorflow/generate_tf_models.py | 122 ++++++++++++++---- .../resize_bilinear_align_corners_net.pb | Bin 0 -> 273 bytes .../resize_bilinear_align_corners_out.npy | Bin 0 -> 3008 bytes ...esize_bilinear_factor_align_corners_net.pb | Bin 0 -> 1710 bytes ...size_bilinear_factor_align_corners_out.npy | Bin 0 -> 1568 bytes .../resize_bilinear_factor_half_pixel_net.pb | Bin 0 -> 1652 bytes .../resize_bilinear_factor_half_pixel_out.npy | Bin 0 -> 1568 bytes .../resize_bilinear_half_pixel_net.pb | Bin 0 -> 273 bytes .../resize_bilinear_half_pixel_out.npy | Bin 0 -> 3008 bytes .../dnn/tensorflow/resize_bilinear_net.pb | Bin 208 -> 263 bytes .../dnn/tensorflow/resize_bilinear_out.npy | Bin 2960 -> 3008 bytes 11 files changed, 95 insertions(+), 27 deletions(-) create mode 100644 testdata/dnn/tensorflow/resize_bilinear_align_corners_net.pb create mode 100644 testdata/dnn/tensorflow/resize_bilinear_align_corners_out.npy create mode 100644 testdata/dnn/tensorflow/resize_bilinear_factor_align_corners_net.pb create mode 100644 testdata/dnn/tensorflow/resize_bilinear_factor_align_corners_out.npy create mode 100644 testdata/dnn/tensorflow/resize_bilinear_factor_half_pixel_net.pb create mode 100644 testdata/dnn/tensorflow/resize_bilinear_factor_half_pixel_out.npy create mode 100644 testdata/dnn/tensorflow/resize_bilinear_half_pixel_net.pb create mode 100644 testdata/dnn/tensorflow/resize_bilinear_half_pixel_out.npy diff --git a/testdata/dnn/tensorflow/generate_tf_models.py b/testdata/dnn/tensorflow/generate_tf_models.py index 662c0f452..87b211e71 100644 --- a/testdata/dnn/tensorflow/generate_tf_models.py +++ b/testdata/dnn/tensorflow/generate_tf_models.py @@ -1,27 +1,29 @@ # This script is used to generate test data for OpenCV deep learning module. -import numpy as np -import tensorflow as tf import os -import argparse import struct -import cv2 as cv +import cv2 as cv +import numpy as np +import tensorflow as tf from tensorflow.python.tools import optimize_for_inference_lib from tensorflow.tools.graph_transforms import TransformGraph np.random.seed(2701) + def gen_data(placeholder): shape = placeholder.shape.as_list() shape[0] = shape[0] if shape[0] else 1 # batch size = 1 instead None return np.random.standard_normal(shape).astype(placeholder.dtype.as_numpy_dtype()) + def prepare_for_dnn(sess, graph_def, in_node, out_node, out_graph, dtype, optimize=True, quantize=False): # Freeze graph. Replaces variables to constants. graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, [out_node]) if optimize: # Optimize graph. Removes training-only ops, unused nodes. - graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, [in_node], [out_node], dtype.as_datatype_enum) + graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, [in_node], [out_node], + dtype.as_datatype_enum) # Fuse constant operations. transforms = ["fold_constants(ignore_errors=True)"] if quantize: @@ -32,6 +34,7 @@ def prepare_for_dnn(sess, graph_def, in_node, out_node, out_graph, dtype, optimi with tf.gfile.FastGFile(out_graph, 'wb') as f: f.write(graph_def.SerializeToString()) + tf.reset_default_graph() tf.Graph().as_default() tf.set_random_seed(324) @@ -40,6 +43,7 @@ def prepare_for_dnn(sess, graph_def, in_node, out_node, out_graph, dtype, optimi # Use this variable to switch behavior of layers. isTraining = tf.placeholder(tf.bool, name='isTraining') + def writeBlob(data, name): if data.ndim == 4: # NHWC->NCHW @@ -51,6 +55,16 @@ def writeBlob(data, name): # Save raw data. np.save(name + '.npy', data.astype(np.float32)) +def readBlob(data): + if data.ndim == 4: + # NCHW -> NHWC + return data.transpose(0, 2, 3, 1) + elif data.ndim == 5: + # NCDHW -> NDHWC + return data.transpose(0, 2, 3, 4, 1) + else: + return data + def runModel(inpName, outName, name): with tf.Session(graph=tf.Graph()) as localSession: localSession.graph.as_default() @@ -66,12 +80,37 @@ def runModel(inpName, outName, name): writeBlob(inputData, name + '_in') writeBlob(outputData, name + '_out') -def save(inp, out, name, quantize=False, optimize=True): + +def save(inp, out, name, quantize=False, optimize=True, is_gen_data=True): + """ + Parameters: + inp: TF placeholder for appropriate input data generation + out: TF layer output + name: name of the particular test data or of the test group data; + may contain the name of the particular test data within test group: + ex.: name='resize_bilinear' or name=('resize_bilinear', 'align_corners') + quantize: indicates weather it's needed to quantize the weights in graph + optimize: indicates weather it's needed to remove training-only ops, unused nodes + is_gen_data: indicates weather it's needed to generate input data + (use False for test groups to avoid extra data generation) + """ + input_data_name = '{}_in' + + if isinstance(name, tuple): + input_data_name = input_data_name.format(name[0]) + name = '_'.join(name) + else: + input_data_name = input_data_name.format(name) + sess.run(tf.global_variables_initializer()) - inputData = gen_data(inp) + if is_gen_data: + inputData = gen_data(inp) + writeBlob(inputData, input_data_name) + else: + inputData = readBlob(np.load(input_data_name + '.npy')) + outputData = sess.run(out, feed_dict={inp: inputData, isTraining: False}) - writeBlob(inputData, name + '_in') writeBlob(outputData, name + '_out') prepare_for_dnn(sess, sess.graph.as_graph_def(), inp.name[:inp.name.rfind(':')], @@ -95,6 +134,7 @@ def save(inp, out, name, quantize=False, optimize=True): node.attr["value"].tensor.ClearField('half_val') tf.train.write_graph(graph_def, "", name + '_net.pb', as_text=False) + # Test cases ################################################################### # shape: NHWC for dtype, prefix in zip([tf.float32, tf.float16], ['', 'fp16_']): @@ -103,18 +143,18 @@ def save(inp, out, name, quantize=False, optimize=True): activation=tf.nn.relu, bias_initializer=tf.random_normal_initializer()) save(inp, conv, prefix + 'single_conv') -################################################################################ + ################################################################################ inp = tf.placeholder(dtype, [3, 7, 5, 4], 'input') conv = tf.layers.conv2d(inputs=inp, filters=5, kernel_size=[5, 3], padding='SAME', use_bias=False) activation_abs = tf.abs(conv) save(inp, activation_abs, prefix + 'padding_same') -################################################################################ + ################################################################################ inp = tf.placeholder(dtype, [2, 4, 6, 5], 'input') conv = tf.layers.conv2d(inputs=inp, filters=4, kernel_size=[3, 5], padding='VALID', activation=tf.nn.elu, bias_initializer=tf.random_normal_initializer()) save(inp, conv, prefix + 'padding_valid') -################################################################################ + ################################################################################ inp = tf.placeholder(dtype, [3, 2, 3, 4], 'input') conv = tf.layers.conv2d(inputs=inp, filters=4, kernel_size=[1, 1], activation=tf.nn.tanh, bias_initializer=tf.random_uniform_initializer(0, 1)) @@ -122,29 +162,29 @@ def save(inp, out, name, quantize=False, optimize=True): bias_initializer=None) eltwise_add_mul = (inp * 0.31 + 2 * conv) * conv2 save(inp, eltwise_add_mul, prefix + 'eltwise_add_mul') -################################################################################ + ################################################################################ inp = tf.placeholder(dtype, [1, 4, 5, 1], 'input') conv = tf.layers.conv2d(inputs=inp, filters=4, kernel_size=[3, 1], padding='VALID') padded = tf.pad(conv, [[0, 0], [0, 2], [0, 0], [0, 0]]) merged = tf.concat([padded, inp], axis=3) save(inp, merged, prefix + 'pad_and_concat') -############################################################################### + ############################################################################### inp = tf.placeholder(dtype, [1, 6, 6, 2], 'input') conv = tf.layers.conv2d(inputs=inp, filters=3, kernel_size=[3, 3], padding='SAME') pool = tf.layers.max_pooling2d(inputs=conv, pool_size=2, strides=2) save(inp, pool, prefix + 'max_pool_even') -################################################################################ + ################################################################################ inp = tf.placeholder(dtype, [1, 7, 7, 2], 'input') conv = tf.layers.conv2d(inputs=inp, filters=3, kernel_size=[3, 3], padding='SAME') pool = tf.layers.max_pooling2d(inputs=conv, pool_size=3, strides=2, padding='VALID') save(inp, pool, prefix + 'max_pool_odd_valid') -################################################################################ + ################################################################################ inp = tf.placeholder(dtype, [1, 7, 7, 2], 'input') conv = tf.layers.conv2d(inputs=inp, filters=3, kernel_size=[3, 3], padding='SAME') relu = tf.nn.relu6(conv * 10) pool = tf.layers.max_pooling2d(inputs=relu, pool_size=2, strides=2, padding='SAME') save(inp, pool, prefix + 'max_pool_odd_same') -################################################################################ + ################################################################################ inp = tf.placeholder(dtype, [1, 5, 6, 2], 'input') deconv_weights = tf.Variable(tf.random_normal([5, 3, 4, 2], dtype=dtype), name='deconv_weights') deconv = tf.nn.conv2d_transpose(value=inp, filter=deconv_weights, @@ -200,10 +240,12 @@ def save(inp, out, name, quantize=False, optimize=True): ################################################################################ from tensorflow.python.framework import function + @function.Defun(tf.float32, func_name='Dropout') def my_dropout(x): return tf.layers.dropout(x, rate=0.1, training=isTraining) + inp = tf.placeholder(tf.float32, [1, 10, 10, 3], 'input') conv = tf.layers.conv2d(inp, filters=3, kernel_size=[1, 1]) dropout = my_dropout(conv) @@ -239,12 +281,12 @@ def my_dropout(x): ################################################################################ times = 4 # Sequence length (number of batches in different time stamps) batch_size = 2 -input_size = 5*6*3 # W*H*C +input_size = 5 * 6 * 3 # W*H*C output_size = 10 # Define LSTM blobk. inp = tf.placeholder(tf.float32, [times, batch_size, input_size], 'input') lstm_cell = tf.contrib.rnn.LSTMBlockFusedCell(output_size, forget_bias=0.9, - cell_clip=0.4, use_peephole=True) + cell_clip=0.4, use_peephole=True) outputs, state = lstm_cell(inp, dtype=tf.float32) # shape(outputs) is a (times, batch_size, output_size) @@ -338,7 +380,7 @@ def my_dropout(x): bias_initializer=tf.random_normal_initializer()) flattened = tf.reshape(conv, [1, -1], 'reshaped') biases = tf.Variable(tf.random_normal([10]), name='matmul_biases') -weights = tf.Variable(tf.random_normal([2*3*5, 10]), name='matmul_weights') +weights = tf.Variable(tf.random_normal([2 * 3 * 5, 10]), name='matmul_weights') mm = tf.matmul(flattened, weights) + biases save(inp, mm, 'nhwc_reshape_matmul') ################################################################################ @@ -349,7 +391,7 @@ def my_dropout(x): transposed = tf.transpose(conv, [0, 1, 2, 3]) flattened = tf.reshape(transposed, [1, -1], 'reshaped') biases = tf.Variable(tf.random_normal([10]), name='matmul_biases') -weights = tf.Variable(tf.random_normal([2*3*5, 10]), name='matmul_weights') +weights = tf.Variable(tf.random_normal([2 * 3 * 5, 10]), name='matmul_weights') mm = tf.matmul(flattened, weights) + biases save(inp, flattened, 'nhwc_transpose_reshape_matmul') ################################################################################ @@ -457,6 +499,7 @@ def my_dropout(x): save(inp, relu, 'leaky_relu_order3', optimize=False) ################################################################################ from tensorflow import keras as K + model = K.models.Sequential() model.add(K.layers.Softmax(name='keras_softmax', input_shape=(2, 3, 4))) sess = K.backend.get_session() @@ -478,6 +521,7 @@ def my_dropout(x): def keras_relu6(x): return K.activations.relu(x, max_value=6) + inp = K.Input(shape=(2, 3, 4), name='keras_relu6_input') relu = K.layers.Activation(keras_relu6, name='keras_relu6')(inp) model = K.Model(inp, relu) @@ -535,15 +579,36 @@ def keras_relu6(x): 'keras_deconv_same', optimize=True) ################################################################################ inp = tf.placeholder(tf.float32, [2, 3, 4, 5], 'input') -resized = tf.image.resize_bilinear(inp, size=[9, 8], name='resize_bilinear') +resized = tf.image.resize_bilinear(inp, size=[9, 8], align_corners=False, name='resize_bilinear') save(inp, resized, 'resize_bilinear') ################################################################################ +inp = tf.placeholder(tf.float32, [2, 3, 4, 5], 'input') +resized = tf.image.resize_bilinear(inp, size=[9, 8], align_corners=True, name='resize_bilinear') +save(inp, resized, ('resize_bilinear', 'align_corners'), is_gen_data=False) +################################################################################ +inp = tf.placeholder(tf.float32, [2, 3, 4, 5], 'input') +resized = tf.image.resize_bilinear(inp, size=[9, 8], align_corners=False, name='resize_bilinear', + half_pixel_centers=True) +save(inp, resized, ('resize_bilinear', 'half_pixel'), is_gen_data=False) +################################################################################ inp = tf.placeholder(tf.float32, [None, 3, 4, 5], 'input') -resized = tf.image.resize_bilinear(inp, size=[tf.shape(inp)[1]*2, tf.shape(inp)[2]*3], +resized = tf.image.resize_bilinear(inp, size=[tf.shape(inp)[1] * 2, tf.shape(inp)[2] * 3], name='resize_bilinear_factor') sub_add = resized - 0.3 + 0.3 save(inp, sub_add, 'resize_bilinear_factor', optimize=False) ################################################################################ +inp = tf.placeholder(tf.float32, [None, 3, 4, 5], 'input') +resized = tf.image.resize_bilinear(inp, size=[tf.shape(inp)[1] * 2, tf.shape(inp)[2] * 3], align_corners=False, + name='resize_bilinear_factor', half_pixel_centers=True) +sub_add = resized - 0.3 + 0.3 +save(inp, sub_add, ('resize_bilinear_factor', 'half_pixel'), optimize=False, is_gen_data=False) +################################################################################ +inp = tf.placeholder(tf.float32, [None, 3, 4, 5], 'input') +resized = tf.image.resize_bilinear(inp, size=[tf.shape(inp)[1] * 2, tf.shape(inp)[2] * 3], align_corners=True, + name='resize_bilinear_factor', half_pixel_centers=False) +sub_add = resized - 0.3 + 0.3 +save(inp, sub_add, ('resize_bilinear_factor', 'align_corners'), optimize=False, is_gen_data=False) +################################################################################ model = K.models.Sequential() model.add(K.layers.SeparableConv2D(filters=4, kernel_size=3, strides=(1, 1), dilation_rate=(2, 3), name='keras_atrous_conv2d_same', @@ -604,7 +669,7 @@ def keras_relu6(x): model = K.models.Sequential() model.add(K.layers.UpSampling2D(size=(3, 2), data_format='channels_last', - name='keras_upsampling2d', input_shape=(2, 3, 4))) + name='keras_upsampling2d', input_shape=(2, 3, 4))) sess = K.backend.get_session() sess.as_default() save(sess.graph.get_tensor_by_name('keras_upsampling2d_input:0'), @@ -641,7 +706,7 @@ def keras_relu6(x): np.save('ssd_mobilenet_v1_ppn_coco.detection_out.npy', detections) ################################################################################ inp = tf.placeholder(tf.float32, [None, 2, 3], 'input') -flatten = tf.reshape(inp, [-1, 2*3], 'planar') +flatten = tf.reshape(inp, [-1, 2 * 3], 'planar') reshaped = tf.reshape(flatten, tf.shape(inp), 'reshape') save(inp, reshaped, 'reshape_as_shape', optimize=False) ################################################################################ @@ -678,10 +743,12 @@ def keras_relu6(x): conv = K.layers.Conv2D(filters=4, kernel_size=1, data_format='channels_last', name='keras_pad_concat_conv', input_shape=(2, 3, 4))(inp) + def pad_depth(x, desired_channels): y = K.backend.random_uniform_variable(x.shape.as_list()[:-1] + [desired_channels], low=0, high=1) return K.layers.concatenate([x, y]) + pad = K.layers.Lambda(pad_depth, arguments={'desired_channels': 5}, name='keras_pad_concat')(conv) sess = K.backend.get_session() @@ -831,7 +898,7 @@ def pad_depth(x, desired_channels): inp = tf.placeholder(tf.float32, [1, 2, 3, 4], 'input') conv = tf.layers.conv2d(inp, filters=5, kernel_size=[1, 1]) flatten = tf.contrib.layers.flatten(conv) -weights = tf.Variable(tf.random_normal([2*3*5, 4]), name='matmul_weights') +weights = tf.Variable(tf.random_normal([2 * 3 * 5, 4]), name='matmul_weights') mm = tf.matmul(flatten, weights) reshape = tf.reshape(mm, [-1, 1, 1, 4], 'reshaped') # NHWC save(inp, reshape, 'matmul_layout') @@ -849,7 +916,8 @@ def pad_depth(x, desired_channels): with tf.gfile.FastGFile('normal_and_abnormal_mnet_v2_96_96_Flatten.pb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) - graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, [inp_node], [out_node], tf.float32.as_datatype_enum) + graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, [inp_node], [out_node], + tf.float32.as_datatype_enum) tf.import_graph_def(graph_def, name='') @@ -879,7 +947,7 @@ def pad_depth(x, desired_channels): shape_input = tf.shape(inp) hi = shape_input[1] / 3 wi = shape_input[2] / 2 -input_down = tf.image.resize(conv, size=[hi,wi], method=0, name='resize_down') +input_down = tf.image.resize(conv, size=[hi, wi], method=0, name='resize_down') save(inp, input_down, 'resize_bilinear_down') ################################################################################ diff --git a/testdata/dnn/tensorflow/resize_bilinear_align_corners_net.pb b/testdata/dnn/tensorflow/resize_bilinear_align_corners_net.pb new file mode 100644 index 0000000000000000000000000000000000000000..20c506b1b6f2d0be23354cf103130ece9dd76cca GIT binary patch literal 273 zcmZ{fF^d8*5Jt1n;5y>rjUuAhSV@}G)=IRr5wQ<(9o<04F44UIXjhP-jOemPa`=1Otn3!+pRtX7shu86_P_oWt*@mGo$Fl{;jn|J!WPzDom!7N+KmAnUcz3M@ri1K#~wf zrE;1Q`}bWtAq=G%6+(MDPN|es63yfJ7oPQQ-Pd(r*Lrh*?sdf7%gw`Qgdjq&Q{=yX z>$)(JgHR+6uou|~MFCsF!o${ttlkplzy5#v;x(JLt{>L72CfNRKfGJbwh_*;5iYP1 z&bPJ^?h^hVGy3nyi=3)62V{D>n2Eey(6LO{a7qHFU16%Jve)O5b8XVfqOt5 zD~hjXS?gLA;>fdLYc>ESO=GaZ_XS9+GP%6LbS6~HqWpPjta*AP*MDaYs3NL4LE{<6 zG_AY*CyNzqTY5AlhL59yW=$4)T$}Z-{+(^YC(N%bg%vCnur}`kk~Dc!M%z^u67z`N zdhJGP2?5mlw1@c&8S}SxM4?OZ4Y2uY8M_tU#O9rCV57n(Q@?vAy;#>n_BRj%%br6^ znF!u!CyATuH}Eo-^Q^oumfZHWQ<{-F*4`h9?{l>A)%z~EepwE!aq;|XBX=q)tf7JH zN#N5t9)DlH0dtT2jDB{eXgksaHaCwagWE+k{=;OhyGw+QT4!)ZYYwg&6M+`_Z=rPp zv2E{9lF}lD_nA8E$J5y1o{6IL^*GD&A>8lY!8AvO(Ben)S?vl9wB3FZlWR`k8!Imq z=l=;h9^Jf|$eix@9A$y_PvBfpDmsae;ngcH=xTW#1bmQq`~EVfy|9tF6kh}L=ST2e zi5z1W+oNZGDdavdfQrd+e9Y)z+$~gMr|(FKL+4E_i8Do26>$v-pJB_Pb(o|-3Bm`K z;M!J(u?uq0p>iVX?%e|Ydq2VNxg0viPNSk*dSLWoJszJKjMsX{q1RCYOW8*lygC7; zxy6)`GMefZXMok?ap*c@3>uWGH5&NoqzGf8 z-+_L8Dg7YbdXN8J+E4MDC}WSd^&- zOwz3f*TMnr<>m>jIc5iY>SRWR<6_BMHkX>M_VR@v_kz`sFW(=*v(2UT@MCxXA_4xryyQIf0C(P2*ZzZo<2@E?Bo;69w~6D&$rgH26a>{XXw3U9i@G z_|P<%)A$N@mPz zJ$BKT%bAdCTFR|-AI<8m^1(n;j@s|@QL-cgwq9FGhT2WwQ7cz;3LmhJwm8tcr^K5% zeaI4pXg1r5q^6zRm*h5n>DXLWBwGdNs^!>qq8P4l`d=T~X&$=cIIDejnRraG3QKUE#AWbs%X^74(~41ebdWO!90MMGU&otsur! zo_~sG_tbEbJqI0Aw8X5+K%Y)e|AicTsxgu~h2ax|u~~QQaK==x#@cvr#wpnlRWDAHH+HyEg@kj1G0OcLczl8ym5IH z-Cfo|JG<*i7*|hAH~kFT_o~4Eg*8gM*TV{d4%xqrrzzjbNG&^to~%oNGED=FSmB4C zeaE2oqITYVsusOWA7Jv&Pg(ZiY`D;1hl>IZ;B+TnoZWQ}1jda>EW3xCFI7Tb zvKQXW%twoqRQ$_wF$Qeb#4+MYSTx!OXYjTXl~_w+v!fD^$yTH0wM_7PSXF5Nwvh2% zDfBo^leksCLWRa@EbSJeB!7?Cz~whKkgvv;ho^95wPPghdQB*m$6`)iHRRMLGdE^I z>b31mHO-T~J>H8Odhhq6g)Q;LXjvB zZYmK3B3*^@@luvMaEnDaO(7Y7kV%{^_?2z0kbLPJcqHBfmp{%!d15thpshtiM>deC z@(wGiP3FpWzlT@64{nqBVN&o4@ICc}z16#kG7CeA+!-ZnBA=nv{R}2Gy+UfQ3}|}2 z9ELQ{qOtW5rsi2mjs~~l#Eez^+b40ZybK5)qz=|iHQupEyTh>K@iI(Ly@V%oa5ViJC{mVAo7z+~i>fe!e4^Yug0|ea_5% z@N2vqS&N@`YGBd6<)F`B;~Op%u*b`_=zMt-t8lmm`^Fqa$^I_!#LVf8#TT=%VJ2DB z%%hA-6Y_g^f(xJTj0Kjj*!;IzWb)%A5_pR#U{)YKOkYYyv4O0pQ~{qShtu9(wY?+;UCz)L4Py<)s0M8`H^YlnbEPYO{pX_jd^9<6H5Ze zDJyy5$JGYu*k~X=xz2=mxT% ztf59Q%;61NkT(fKceY@+}-m6_u}< zLU;JCcgA282Oxdk%(NFplVJQcK8+TziIIt{E_fZE8FmV!>l#7n?a47%`l^=zsXo$UKFxD~!`Q_K1^p zT7u`YIMA^P`Qk(b8}Op_@+VTVqU=eR~Ep+Lkqz+!cZI~v}FVLM^SCa9x5B! zOY1)-F)i&c!1oMty`fh8@}i}z&q+c;S4LWOja2@mhk2c|Mz{+M+f zpUARyUsniz-Y%9KR`I0;0{WmAM4@d{AoLXDLme+LWqnm;Vcb+qubht_3U~HUM@~0p z#$$`qUZ^nEqG&r8aevB5Z0V>*KhFm4yOi_fH~eop*xrS#p>dqE^ zI`}8-q*?X4=yAvpL%b#-CN06TU58Qs;6A*!dM(U*VM-brryx#Kj4?GwanL^l4YnrX sss>N=+9e00^nJ|f!dC9`E`OA&I#Z++r1@1PK$N&HU literal 0 HcmV?d00001 diff --git a/testdata/dnn/tensorflow/resize_bilinear_factor_align_corners_net.pb b/testdata/dnn/tensorflow/resize_bilinear_factor_align_corners_net.pb new file mode 100644 index 0000000000000000000000000000000000000000..1917fb5482f4d6177ec813c9341d0b403e72b442 GIT binary patch literal 1710 zcmb`H-)_?|6vpeMQOfByCZj0)# z#G*h79;&l_nbL5Mo#RC;Q|}=-U#Z9#-XC|Bl{P{ngx15e;T=3`6j3?tl(B9oqw={$ zG`|qsuZ4s6CPk7Hog*=2BF(vzZP{$w7DYu^6*& z0bQ*xs+O=gx_+tbm92|&5=U&M8fU9U9Lib3tnY)7J zH-qKZ!7Bag{FpEX+ct((4{ij*|HROM-Qjn*Xp*{r?5TArete2#AsLHX+Ii8xAl7tPJRkIZ=>kTkQDC3NLBK< F^#_9s%Wwby literal 0 HcmV?d00001 diff --git a/testdata/dnn/tensorflow/resize_bilinear_factor_align_corners_out.npy b/testdata/dnn/tensorflow/resize_bilinear_factor_align_corners_out.npy new file mode 100644 index 0000000000000000000000000000000000000000..914a738e55aac33ea44ebcf714c046f49f6af85e GIT binary patch literal 1568 zcmb8j`#;rp9Ki8I;y9HlijML)-CU0xIoLYq`+lE1=w8THtccKzBFC8sC6-B#l9+T^ zh~}C+bVKL+d3T{)nr_x@Tf-yOmQ<){(%PS}*Ke<9&dRS=c>C)J;slA-p<%HhYU}wD zYvo3{wNzrgFcD=L>^3#fE+I*zn*jVV~Z{Nh)!XN?fH9r+KzgNwVbs zC+gZ(&B=75VQa`0=sj+LzdB0LWpFy?)>+_S(o`%J{|0xBoMEwJAwQv@ocVoGt7J|d zU}IDd^*TbFdfNsESIF^33u1-mG8}$tj^h-2WNh6g%1fQ;PPl9q#HWGQ5!AS{kevvjGsrLyK*Q)ubqsC!$DKq2QrNu=VZlT zT4Xvl#E0T0u@WU(6AWFg1mlQOR=Hy}S)HgN-TO=Vrmq{oQ@99oWAgE1X)UhbauAba z;t_P+G4=ixXnb~%k9}f5*KZ_|@TV}AusRa9PjbXlrP(-OQjL!+vM_g15Js+>exJvzGU~PzY~Y z3SoK9SWuny=U;7Bv(2YlpxwrRSw`8=*xd(7T6~JO9oay#t~MsSeU{6FW^Q(v0lTKD zXWNgMk*TF6H=@o2i);bbKU1LIQ%?*CdkA&?#oS0_2J6n4K*si7bZlb=opmcGDN{h6 zLja5(-3~7L`=PJ@BvdSU0&?%q@IazHj#M;3eCs7%yx4?d?SG(%_od+c-5e0CEV^Kv z>BwpdiGSzN55kGNFgx!Te0iuG)peHGY*x*Oz*3qSX9oi9ZpDU&GuW3pCCrP0S>DNZ zIKM9q_0@Oqg?%k9J1#@jEe~x+vn#y_Jj`UX(^%K-H7q2+mPw9U^G4ZQVVS!=dKa$5 zqy~# z*lHz0Q|BhwI69eK%y*#v`-iFS`6)WQ^CTsiRnq#CWmGia1v|{Dz`^AeMA?dQRpnUh zj7U{_8UDkLPuoH7+;T~C^8mdH&!#Ay{dDm6Y}h6eV40O0-gA#a9}hK#8yn+bj2qMh z_^^)Ug=}zFHXBc=%Yx15Qf)rRtb})RIXZzdqUy>dmv4YIUqF^LL7=Jg1B1FuG?5~ooGKG<;z#1L8R8i=Bb;DHt|76Vs6aE#;${897{c6d@d7q%aaMIouJ?l z+CnKgOIC|sZ~!n8H&VN8p53O!w~bgYFar}hT5f_4__vwr+&N>28_|3RyNhP)5F}vnrI*mJak<_oujYBP@IgqToj0^zk_9U4@Vj?hT-ch!@m<-rv7nzvXNUHZcSbvk~3W11(fH^6)5nT%H$zO$_nRqv%VI N+MLwSMV7L}`wNV{x`F@z literal 0 HcmV?d00001 diff --git a/testdata/dnn/tensorflow/resize_bilinear_factor_half_pixel_out.npy b/testdata/dnn/tensorflow/resize_bilinear_factor_half_pixel_out.npy new file mode 100644 index 0000000000000000000000000000000000000000..7aa73c42cd70df093c516112790c600c2d154e71 GIT binary patch literal 1568 zcmb8jjWg7F90%~Nk+e~iB#zroB4j+KV}IYzcG9FaC)Ch&c~}(5b9;1mT7#7KYSv0p zShYlVY2BfY{e8bgPvTtBlO~~ZQ|E9!oE=A9-`{_5@0r)U=lxa&Y}*nTHj5|aeQOsT zlO{>E^A^|%V?FGg1a`4Wsp+YaiQ!49(J}Pj5Sfq`^YKsH6PXi+n_b7sKu1&F>i(n zG0H@REkj6{VFUkoIf7==4dO0)L*XM9MI7BId=glXsg3LSEY*C;9}Q3Tqc=zGAz9=H zf4z-__}VORu9*iNUR6w1Lo22i*37cv#t}{ zciqHwhoXooSVLY(#yK800H`Y29B@uAMxkf&8Q!>wP_S+Tt9S4r`&+XO>31t8TMqYg zrrPC*>YdVqTBR?D%(6i?9Sk{LwqW@AIP#F2<5R=FWYHZBr`5KQrP8B^kYjBc7zMUK zNaiV6>X?lPF4I1Y3517To@2b+S! zk-F^&n^Dk!DT6P%IO^OWv|aKgkVTE~@7V%KiWNbx(F0VKl8Z*S-C&w6a#$*xzZa;r z@=M^Xd6^fKzP)r?2Uv1^fJm6l8#F_a^q!)dVR+co?a?}YtqLc{@CYw z9HC?OGQ=jeiVbnf(-zbhu44@FdsZi9F}3fPTZo#vpM^dAqDfGE2Pxd2On9p`*h(Qo zfgSI(75)`C>-17WnOa*j)ZM#kG%;ZT(hLdISdJlqy94w1SUXPq;7Xb-vq_^@E$8AY zJw!D^9#Y2NL{~cJ!2N(1$j5-7%C-^|+KaHX(~%fXC^_$&$`Pe=GejbRDQoPz9lJF4 zqN%gFP!-w-I|f65*PQQ@))qu4@uyE1YS1>BEj}BI`^#=(W9A%se=P@^v;)v~KOXcd z+Zb7*H=!=BeT%8r7UyyDlso3>y0x#m?9iT{8j#Dcd>BksFwd`aVbLGE2qm3f&oQe0 z6Ys3Hz@ZriN$ z0oIjRlA{$oA`;)mabYJoZ=RI^RUVQDMFkQV-7AEc;b4^hPL26vVMJ2kN7$9F#Jv3z z&h^q+K-sW!LH5)Lp3E`@!M+SOd!aiq6aPr~n)76Ov6_s3qv8Z5>_C+2#dRd#WDFZV zV<2GV0>oI4;*s8bGI2GJ=<0LG?8}i`gILiFZ?p>Vyt5VZ?0JB0Er7{5E70(rIR63pv)ru! literal 0 HcmV?d00001 diff --git a/testdata/dnn/tensorflow/resize_bilinear_half_pixel_net.pb b/testdata/dnn/tensorflow/resize_bilinear_half_pixel_net.pb new file mode 100644 index 0000000000000000000000000000000000000000..4163e363da017ce55a1c3d9fa4d3ccf3efb4dd97 GIT binary patch literal 273 zcmZ{fF>k^!5QQCsMYyO{dmx0USeTHPqKugk9a#`!w@f%1OO_Kkp=vwu+cANm66){` zPp9|ZJy=4|xa!oX6qAkBITfWnke3OFL-SJ+@bVenmDd=~c^V{zbbQwPrQ`VfZC+ctl7kDv7K uA7_inKh<&!SZI4xmH8&Ca&is%8u>huJ&1Pgr~y1_Yd)RIOYd%1hU@}dBu6v= literal 0 HcmV?d00001 diff --git a/testdata/dnn/tensorflow/resize_bilinear_half_pixel_out.npy b/testdata/dnn/tensorflow/resize_bilinear_half_pixel_out.npy new file mode 100644 index 0000000000000000000000000000000000000000..ed64949a9a5f01f29c9e88cbd6d3fe31e0809825 GIT binary patch literal 3008 zcmbW({XfyJ%=z85<0o*xSN`+a`U;aqp$81J8LN_MK>svVZJ8a9HfRQ_5BOZ^U)auF|I1hS1&8nYkcS8Qh3@*WTiDr%KC=-y z*oc-|+K3K_{*Mv9h`z!_Y}pRrJ_g&nj4|M@0Y0m&1~1=6e&*RCrp)7lrgD?bIVi{- z1t1%op39*`Zw%5?Q~9VD*H{!+s5sfH2P!?oV36yG7ejn;y}}p^4v6_*O&N<>pTSFl zySWgjx9}!v73QCn;q(y1!=b3~@V~>lWFsu|VIrGsYz`*r_0S|~1m~fBaDJ$T4}O}& z*4^-@xrfBm>S)FULs5#hk!`H`w-lB=v5M(t8?&by3aRRDJhe2uVSCJ4m^9Cm47=QD z;g6@7_xLPcnLqd@21Q?)V*ivqd}!HymJxE7#ZN4w3cXtTV@Dq;^Wr3HEFILqowCay zeQE(Bhvl4^>U>$!94HM3xU$GaCPfCXNUwYY& zKu?-;L&l1XKY^LIWfJC)qS@LK6M2H-IHcv(Q zrfjSZ7vl8sBFOU8Mw3aO;_0>%^yZ?NI-g|$J8XyV(u9~|Isk5?!%)~g25gG3HhftV z-3*;Z)}O8hu0n`Hb3+VReIK+MGC^hWCCYU@P6>5(w0T7c6K7|F&PPwc@moz02*MrK zr@dy0$Mwlg*O#LDEa==`56-GA2(Am>^3vo?7ILit1gY2gqt_FtWJMAUkFI1@!)Dx& zYaKh7qeseI$Jm&gnR*x=YJY+UEVQw(CXNptFrrG6B&u9eM9SP>XbSQ+UywOofpuEX zp|*Dn?%4mul@xwTmTk#&VMQt!oRWZ`?UYl|gLIHrB;Xmg2|M)UZ0Ue6eXMdHW*a+$^n^2n6h`tF5Zl0Y5iSn(TCC=IW1&{q~(H3=ZU;8ck=KmXGY?rWeu@V|^ zzsY**b6MjqQ&w;PmMeL#f|4y|pd)?BM31~Ea?F)>k#))<$+Hp0|qCzaF+zN4J3+dAoI>3m<>OIPOmja*WQ6ajUM*aWgD&fd?WRFeo0kN zvY&Hof4A}dHhH`c)xhpDQ|v3uMB!i#3IjOQnx}^5+VikX zXoJe!ieD%xKB_PI!l4;6jt1e>ngr+^HsZP#$3X7tYoN^kqpvS%Pn;yFeqM+yW;V7j z6EgYPI;K1MBeu}+szU$u1R5GXMzdHM-upEL`VXp*#UXd9Pnkg`pER)j=k8;y$t5&$ zw?~y!OMZUHUXo&K^EZU~Y$YFqz^Ns9#=* z>+)~lR+S1|r#T->zP!Oz)}*oNQ~zSF-v@$yx)8aFbQB&d!1tLxsQpDL#k!wH^)6HKI z*0HLBIh50UlKoMli*-(M*b|w}g42Iskw1^H+bE_jO`ECvq&`KpszUAHdw6FMK?7mI z)NbNKi${a$(AaT`b_yh~xB_-6t69<9@g3{QkuwwBcoyATzu?9=z05@o&x^-1_&{+?^%|gY!$EDj=4B(&fzBf}B{VRUuE3L2hNu zHmGv(hP0cHxCQi@k7@XYeU{=wT6&AfMlFgHXTJcxZ;ZRA_QH8xk_D?jv5=}nKaub0 zLK1j5IC&OZqB3`1V^Z6;@i~dU>*dd7s&PdX1vFsQKz90GR53Wjl)35U%VhL2f<$@} z=4;qq(>nTyt+^_umivyR=i^Gg*v{m@Wyi&unO9I+BE%h#lbD>Ek)M;2TBOCz#hOx5S&%BkWWcB;&Ba=rkqG1n zSqTU+aWDZfGZ3=?G3!KSAI6G_1z{=@TtXR%Icf0)nH8xy@yV%qC8Ek)M;2TBOCz#hOx5S&%BkWWYGl!G|$!Vs981 MI~QXJNI-}I0PT$q&Hw-a diff --git a/testdata/dnn/tensorflow/resize_bilinear_out.npy b/testdata/dnn/tensorflow/resize_bilinear_out.npy index c07383032f9b3253f91161a269fb02c4e3c57386..9773798e3dd550a7516fe395a56962ae73a9fd3c 100644 GIT binary patch delta 953 zcmb8e?N3uz901^3w59C=RLtmB8d`Lf7h9l00fl>hL|J3Ti!cP+pd`vPq;VaJgT~Dd zKx9^6ZF3sj#IXiNkY!OYh&t{$Wvn5@#C3>UVTOnxLt{3W8|#*JTWb zT3J$N7BG!2iBYTmA6!L-O%|yUsxwWZn2@B)tu_N0t`HNn-^RZ@QZXft_Yk;ApXD0l-OKpG z8|xxk^`Bgv_h+!IbG7L`J5GU~9H#c-A?y`y?ZZ{?=fey;=H9F8^y^rH-dRa_%8KPcda)(7L?Pa#iB0p+iid`+gCtfZs=23Pn^KgynEp$s40*sYO` zSm|j?1zl3*hI{@fSwY5lu3zrfIPSsPI+uaEIxd-e~g_k987mUNIQB8a)Ep8nRBO4Yh4 zqQ)hn^pu&+)0`|TiaLS{8qFn#XVjhNWXvKPt8gRT-wsqb-;Qj!Pt>kwu3v(aCLI1;;g4{$a9K?qsb6$^L=0KQghu?ophOx^Z z=igaVQ9gr*B0o>m?j1qr-qW+iFV2yeln{9~oJ)7vcFAaOAgSb5AV&^Zk#=K=TfkUuC<5yo-Y$mU{B7tl|*BNrek-pzgAdcg@D+Oi)Jv^KtT3wsNnK!jHJND*5meUmSN5w!Yv0(G_hgpQm#!yTP<65;-# z!*r(QCV9TMpUZOm$$z5wlDvSmj4PvR-~qbX`4)2TnByj`JFXi7<<#Pt4qdMsM1Q=h zgo-sI9CpXUZ+Mw0O1XAzjIXIRP+{CqBe`!@K(BuYZus8^O;{A*DK$6z#i&YS4V7Sy77sK_C%c}5;pI~B*ouMg*|!~y2p-lGv8h1&m^bAQ_QZ@XfwOh3J$Jr zC-0T@lIdTR(4RgIBey3}*Xcqkd{R70rfe4C^nT0l`N4~FXF1r9KV8?JBpbBQQ|2-n zQ00eS{3`(-&&7k5DB)R*m*Ay~RE7aq(BK3%5M_7ZUe<4W(uc$d%#5Xgf|6< zcI*@%y>kO~ttsh7ULBfNdybC@3vjVY4@&>Xg^K2#s2=+woh);u1FR?r&lWQaYqKk$ zp}mO~e;-bn*VYPz+d2<`rePesK3QRYq6JLYEza92TjW8M>VTCyYT$c5E*M@`QDHV- z5Py-F@yZ}NdyM6ZfB%TcQi9~s)qMK0BMyz}>>zxIyP_1L(Wn2RQOrke*WQ}HM8rlH z`QWQ}$bk4~65AO=pPx8OdKB#B7r`Xyl)tFOc>C>SPeIgH%Z!)DpH$K$&8o= z=KDh3zSHE+p$jB=wV!y(+PEjGAZo@wiH-wa=_)w&G4?G6<(_3k^P%-V#ihMYyJm@r<_m#