Skip to content

Test in eager model #695

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Nov 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 2 additions & 9 deletions tensorflow_addons/image/transform_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
])


@test_utils.run_all_in_graph_and_eager_modes
class ImageOpsTest(tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes
def test_compose(self):
for dtype in _DTYPES:
with test_utils.use_gpu():
Expand All @@ -52,7 +52,6 @@ def test_compose(self):
[[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]],
image_transformed)

@test_utils.run_in_graph_and_eager_modes
def test_extreme_projective_transform(self):
for dtype in _DTYPES:
with test_utils.use_gpu():
Expand All @@ -70,12 +69,9 @@ def test_extreme_projective_transform(self):
def test_transform_static_output_shape(self):
image = tf.constant([[1., 2.], [3., 4.]])
result = transform_ops.transform(
image,
tf.random.uniform([8], -1, 1),
output_shape=tf.constant([3, 5]))
image, tf.random.uniform([8], -1, 1), output_shape=[3, 5])
self.assertAllEqual([3, 5], result.shape)

@test_utils.run_in_graph_and_eager_modes
def test_transform_unknown_shape(self):
fn = transform_ops.transform.get_concrete_function(
tf.TensorSpec(shape=None, dtype=tf.float32),
Expand Down Expand Up @@ -113,7 +109,6 @@ def transform_fn(x):

self.assertAllClose(theoretical[0], numerical[0])

@test_utils.run_in_graph_and_eager_modes
def test_grad(self):
self._test_grad([16, 16])
self._test_grad([4, 12, 12])
Expand All @@ -122,15 +117,13 @@ def test_grad(self):
self._test_grad([4, 12, 3], [8, 24, 3])
self._test_grad([3, 4, 12, 3], [3, 8, 24, 3])

@test_utils.run_in_graph_and_eager_modes
def test_transform_data_types(self):
for dtype in _DTYPES:
image = tf.constant([[1, 2], [3, 4]], dtype=dtype)
self.assertAllEqual(
np.array([[4, 4], [4, 4]]).astype(dtype.as_numpy_dtype()),
transform_ops.transform(image, [1] * 8))

@test_utils.run_in_graph_and_eager_modes
def test_transform_eager(self):
image = tf.constant([[1., 2.], [3., 4.]])
self.assertAllEqual(
Expand Down
2 changes: 1 addition & 1 deletion tensorflow_addons/layers/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ py_test(
)

py_test(
name = "layers_normalizations_test",
name = "normalizations_test",
size = "small",
srcs = [
"normalizations_test.py",
Expand Down
32 changes: 12 additions & 20 deletions tensorflow_addons/layers/normalizations_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from tensorflow_addons.utils import test_utils


@test_utils.run_all_in_graph_and_eager_modes
class NormalizationTest(tf.test.TestCase):

# ------------Tests to ensure proper inheritance. If these suceed you can
Expand All @@ -50,7 +51,8 @@ def run_reshape_test(axis, group, input_shape, expected_shape):
reshaped_inputs, group_shape = group_layer._reshape_into_groups(
inputs, (10, 10, 10), tensor_input_shape)
for i in range(len(expected_shape)):
self.assertEqual(int(group_shape[i]), expected_shape[i])
self.assertEqual(
self.evaluate(group_shape[i]), expected_shape[i])

input_shape = (10, 10, 10)
expected_shape = [10, 5, 10, 2]
Expand Down Expand Up @@ -99,13 +101,13 @@ def _test_specific_layer(self, inputs, axis, groups, center, scale):
axis=axis, groups=groups, center=center, scale=scale)
model = tf.keras.models.Sequential()
model.add(layer)
outputs = model.predict(inputs)
outputs = model.predict(inputs, steps=1)
self.assertFalse(np.isnan(outputs).any())

# Create shapes
if groups is -1:
groups = input_shape[axis]
np_inputs = inputs.numpy()
np_inputs = self.evaluate(inputs)
reshaped_dims = list(np_inputs.shape)
reshaped_dims[axis] = reshaped_dims[axis] // groups
reshaped_dims.insert(1, groups)
Expand Down Expand Up @@ -134,8 +136,9 @@ def _test_specific_layer(self, inputs, axis, groups, center, scale):
output_test = gamma * zeroed * rsqrt + beta

# compare outputs
output_test = np.reshape(output_test, input_shape.as_list())
self.assertAlmostEqual(np.mean(output_test - outputs), 0, places=7)
output_test = tf.reshape(output_test, input_shape)
self.assertAlmostEqual(
self.evaluate(tf.reduce_mean(output_test - outputs)), 0, places=7)

def _create_and_fit_Sequential_model(self, layer, shape):
# Helperfunction for quick evaluation
Expand All @@ -153,7 +156,6 @@ def _create_and_fit_Sequential_model(self, layer, shape):
model.fit(x=input_batch, y=output_batch, epochs=1, batch_size=1)
return model

@test_utils.run_in_graph_and_eager_modes
def test_weights(self):
# Check if weights get initialized correctly
layer = GroupNormalization(groups=1, scale=False, center=False)
Expand All @@ -167,24 +169,22 @@ def test_weights(self):
self.assertEqual(len(layer.weights), 2)

def test_apply_normalization(self):

input_shape = (1, 4)
expected_shape = (1, 2, 2)
reshaped_inputs = tf.constant([[[2.0, 2.0], [3.0, 3.0]]])
layer = GroupNormalization(groups=2, axis=1, scale=False, center=False)
normalized_input = layer._apply_normalization(reshaped_inputs,
input_shape)
self.assertTrue(
tf.reduce_all(
tf.equal(normalized_input,
tf.constant([[[0.0, 0.0], [0.0, 0.0]]]))))
np.all(
np.equal(
self.evaluate(normalized_input),
np.array([[[0.0, 0.0], [0.0, 0.0]]]))))

def test_axis_error(self):

with self.assertRaises(ValueError):
GroupNormalization(axis=0)

@test_utils.run_in_graph_and_eager_modes
def test_groupnorm_flat(self):
# Check basic usage of groupnorm_flat
# Testing for 1 == LayerNorm, 16 == GroupNorm, -1 == InstanceNorm
Expand All @@ -197,19 +197,15 @@ def test_groupnorm_flat(self):
self.assertTrue(hasattr(model.layers[0], 'gamma'))
self.assertTrue(hasattr(model.layers[0], 'beta'))

@test_utils.run_in_graph_and_eager_modes
def test_instancenorm_flat(self):
# Check basic usage of instancenorm

model = self._create_and_fit_Sequential_model(InstanceNormalization(),
(64,))
self.assertTrue(hasattr(model.layers[0], 'gamma'))
self.assertTrue(hasattr(model.layers[0], 'beta'))

@test_utils.run_in_graph_and_eager_modes
def test_initializer(self):
# Check if the initializer for gamma and beta is working correctly

layer = GroupNormalization(
groups=32,
beta_initializer='random_normal',
Expand All @@ -223,9 +219,7 @@ def test_initializer(self):
negativ = weights[weights < 0.0]
self.assertTrue(len(negativ) == 0)

@test_utils.run_in_graph_and_eager_modes
def test_regularizations(self):

layer = GroupNormalization(
gamma_regularizer='l1', beta_regularizer='l1', groups=4, axis=2)
layer.build((None, 4, 4))
Expand All @@ -237,11 +231,9 @@ def test_regularizations(self):
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)

@test_utils.run_in_graph_and_eager_modes
def test_groupnorm_conv(self):
# Check if Axis is working for CONV nets
# Testing for 1 == LayerNorm, 5 == GroupNorm, -1 == InstanceNorm

groups = [-1, 5, 1]
for i in groups:
model = tf.keras.models.Sequential()
Expand Down
14 changes: 8 additions & 6 deletions tensorflow_addons/metrics/r_square_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,10 @@

import tensorflow as tf
from tensorflow_addons.metrics import RSquare
from tensorflow_addons.utils import test_utils


@test_utils.run_all_in_graph_and_eager_modes
class RSquareTest(tf.test.TestCase):
def test_config(self):
r2_obj = RSquare(name='r_square')
Expand All @@ -47,8 +49,8 @@ def check_results(self, obj, value):
def test_r2_perfect_score(self):
actuals = tf.constant([100, 700, 40, 5.7], dtype=tf.float32)
preds = tf.constant([100, 700, 40, 5.7], dtype=tf.float32)
actuals = tf.constant(actuals, dtype=tf.float32)
preds = tf.constant(preds, dtype=tf.float32)
actuals = tf.cast(actuals, dtype=tf.float32)
preds = tf.cast(preds, dtype=tf.float32)
# Initialize
r2_obj = self.initialize_vars()
# Update
Expand All @@ -59,8 +61,8 @@ def test_r2_perfect_score(self):
def test_r2_worst_score(self):
actuals = tf.constant([10, 600, 4, 9.77], dtype=tf.float32)
preds = tf.constant([1, 70, 40, 5.7], dtype=tf.float32)
actuals = tf.constant(actuals, dtype=tf.float32)
preds = tf.constant(preds, dtype=tf.float32)
actuals = tf.cast(actuals, dtype=tf.float32)
preds = tf.cast(preds, dtype=tf.float32)
# Initialize
r2_obj = self.initialize_vars()
# Update
Expand All @@ -71,8 +73,8 @@ def test_r2_worst_score(self):
def test_r2_random_score(self):
actuals = tf.constant([10, 600, 3, 9.77], dtype=tf.float32)
preds = tf.constant([1, 340, 40, 5.7], dtype=tf.float32)
actuals = tf.constant(actuals, dtype=tf.float32)
preds = tf.constant(preds, dtype=tf.float32)
actuals = tf.cast(actuals, dtype=tf.float32)
preds = tf.cast(preds, dtype=tf.float32)
# Initialize
r2_obj = self.initialize_vars()
# Update
Expand Down
11 changes: 1 addition & 10 deletions tensorflow_addons/optimizers/conditional_gradient_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import conditional_gradient as cg_lib


@test_utils.run_all_in_graph_and_eager_modes
class ConditionalGradientTest(tf.test.TestCase):
def _update_conditional_gradient_numpy(self, var, norm, g, lr, lambda_):
var = var * lr - (1 - lr) * lambda_ * g / norm
Expand Down Expand Up @@ -104,20 +105,16 @@ def doTestBasic(self, use_resource=False, use_callable_params=False):
- (1 - 0.5) * 0.01 * 0.01 / norm1]),
self.evaluate(var1))

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testBasicCallableParams(self):
self.doTestBasic(use_resource=True, use_callable_params=True)

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testVariablesAcrossGraphs(self):
optimizer = cg_lib.ConditionalGradient(0.01, 0.5)
with tf.Graph().as_default():
Expand Down Expand Up @@ -148,7 +145,6 @@ def _DtypesToTest(self, use_gpu):
else:
return [tf.half, tf.float32, tf.float64]

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeSparseResourceVariable(self):
# This test invokes the ResourceSparseApplyConditionalGradient
# operation. And it will call the 'ResourceScatterUpdate' OpKernel
Expand Down Expand Up @@ -194,7 +190,6 @@ def loss():
(1 - learning_rate) * lambda_ * grads0_1 / norm0
]], self.evaluate(var0))

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeWith2DIndiciesForEmbeddingLookup(self):
# This test invokes the ResourceSparseApplyConditionalGradient
# operation.
Expand Down Expand Up @@ -224,7 +219,6 @@ def loss():
learning_rate * 1 - (1 - learning_rate) * lambda_ * 1 / norm0
]], self.evaluate(var0))

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testTensorLearningRateAndConditionalGradient(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.cached_session():
Expand Down Expand Up @@ -397,7 +391,6 @@ def _dbParamsCG01(self):
# pylint: enable=line-too-long
return db_grad, db_out

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testLikeDistBeliefCG01(self):
with self.cached_session():
db_grad, db_out = self._dbParamsCG01()
Expand All @@ -417,7 +410,6 @@ def testLikeDistBeliefCG01(self):
cg_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), self.evaluate(var0))

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testSparse(self):
# TODO:
# To address the issue #347.
Expand Down Expand Up @@ -516,7 +508,6 @@ def testSparse(self):
(1 - learning_rate) * lambda_ * 0.01 / norm1]),
self.evaluate(var1)[2])

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testSharing(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.cached_session():
Expand Down
5 changes: 1 addition & 4 deletions tensorflow_addons/optimizers/cyclical_learning_rate_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ def _maybe_serialized(lr_decay, serialize_and_deserialize):
return lr_decay


@test_utils.run_all_in_graph_and_eager_modes
@parameterized.named_parameters(("NotSerialized", False), ("Serialized", True))
class CyclicalLearningRateTest(tf.test.TestCase, parameterized.TestCase):
@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testTriangularCyclicalLearningRate(self, serialize):
initial_learning_rate = 0.1
maximal_learning_rate = 1
Expand Down Expand Up @@ -68,7 +68,6 @@ def testTriangularCyclicalLearningRate(self, serialize):
1e-6)
self.evaluate(step.assign_add(1))

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testTriangular2CyclicalLearningRate(self, serialize):
initial_learning_rate = 0.1
maximal_learning_rate = 1
Expand Down Expand Up @@ -103,7 +102,6 @@ def testTriangular2CyclicalLearningRate(self, serialize):
1e-6)
self.evaluate(step.assign_add(1))

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testExponentialCyclicalLearningRate(self, serialize):
initial_learning_rate = 0.1
maximal_learning_rate = 1
Expand Down Expand Up @@ -133,7 +131,6 @@ def testExponentialCyclicalLearningRate(self, serialize):
self.evaluate(exponential_cyclical_lr(step)), expected, 1e-6)
self.evaluate(step.assign_add(1))

@test_utils.run_in_graph_and_eager_modes(reset_test=True)
def testCustomCyclicalLearningRate(self, serialize):
initial_learning_rate = 0.1
maximal_learning_rate = 1
Expand Down
Loading