|
6 | 6 | from sklearn.datasets import fetch_openml
|
7 | 7 | from sklearn.model_selection import train_test_split
|
8 | 8 | from sklearn.preprocessing import LabelEncoder, StandardScaler
|
9 |
| -from tensorflow.keras.models import Sequential, model_from_json |
| 9 | +from tensorflow.keras.models import Sequential, Model, model_from_json |
10 | 10 | from tensorflow.keras.optimizers import Adam
|
11 | 11 | from tensorflow.keras.regularizers import l1
|
12 |
| -from tensorflow.keras.layers import Activation, BatchNormalization |
| 12 | +from tensorflow.keras.layers import Activation, BatchNormalization, Input |
13 | 13 | from qkeras.qlayers import QDense, QActivation
|
14 | 14 | from qkeras.quantizers import quantized_bits, quantized_relu, ternary, binary
|
15 | 15 | from qkeras.utils import _add_supported_quantized_objects; co = {}; _add_supported_quantized_objects(co)
|
@@ -228,3 +228,83 @@ def test_quantizer(randX_1000_1, quantizer, backend):
|
228 | 228 | y_hls4ml = hls_model.predict(X)
|
229 | 229 | # Goal is to get it passing with all equal
|
230 | 230 | np.testing.assert_array_equal(y_qkeras, y_hls4ml)
|
| 231 | + |
| 232 | + |
| 233 | +@pytest.mark.parametrize( |
| 234 | + 'weight_quantizer,activation_quantizer,', [ |
| 235 | + ('binary', 'binary'), |
| 236 | + ('ternary', 'ternary'), |
| 237 | + ('quantized_bits(4, 0, alpha=1)', 'quantized_relu(2, 0)'), |
| 238 | + ('quantized_bits(4, 0, alpha=1)', 'quantized_relu(4, 0)'), |
| 239 | + ('quantized_bits(4, 0, alpha=1)', 'quantized_relu(8, 0)') |
| 240 | + ] |
| 241 | +) |
| 242 | +def test_qactivation_kwarg(randX_100_10, |
| 243 | + activation_quantizer, |
| 244 | + weight_quantizer): |
| 245 | + if activation_quantizer in ['binary', 'ternary']: |
| 246 | + name = 'bnbt_qdense_alpha' |
| 247 | + else: |
| 248 | + name = 'qdense_{}'.format( |
| 249 | + eval(activation_quantizer).__class__.__name__) |
| 250 | + |
| 251 | + inputs = Input(shape=(10,)) |
| 252 | + |
| 253 | + outputs = QDense( |
| 254 | + 10, |
| 255 | + activation=activation_quantizer, |
| 256 | + name='qdense', |
| 257 | + kernel_quantizer=weight_quantizer, |
| 258 | + bias_quantizer=weight_quantizer, |
| 259 | + kernel_initializer='lecun_uniform' |
| 260 | + )(inputs) |
| 261 | + model = Model(inputs, outputs) |
| 262 | + |
| 263 | + hls4ml.model.optimizer.get_optimizer( |
| 264 | + 'output_rounding_saturation_mode' |
| 265 | + ).configure( |
| 266 | + layers=[name], |
| 267 | + rounding_mode='AP_RND_CONV', |
| 268 | + saturation_mode='AP_SAT' |
| 269 | + ) |
| 270 | + config = hls4ml.utils.config_from_keras_model( |
| 271 | + model, |
| 272 | + granularity='name' |
| 273 | + ) |
| 274 | + |
| 275 | + out_dir = str( |
| 276 | + test_root_path / 'hls4mlprj_qactivation_kwarg_{}'.format( |
| 277 | + activation_quantizer |
| 278 | + ) |
| 279 | + ) |
| 280 | + |
| 281 | + hls_model = hls4ml.converters.convert_from_keras_model( |
| 282 | + model, |
| 283 | + hls_config=config, |
| 284 | + output_dir=out_dir |
| 285 | + ) |
| 286 | + hls4ml.model.optimizer.get_optimizer( |
| 287 | + 'output_rounding_saturation_mode' |
| 288 | + ).configure(layers=[]) |
| 289 | + hls_model.compile() |
| 290 | + |
| 291 | + # Verify if activation in hls_model |
| 292 | + assert name in [layer.name for layer in hls_model.get_layers()] |
| 293 | + |
| 294 | + # Output tests |
| 295 | + X = randX_100_10 |
| 296 | + X = np.round(X * 2**10) * 2**-10 |
| 297 | + y_qkeras = model.predict(X) |
| 298 | + y_hls4ml = hls_model.predict(X) |
| 299 | + if hasattr(eval(activation_quantizer), 'bits'): |
| 300 | + np.testing.assert_allclose( |
| 301 | + y_qkeras.ravel(), |
| 302 | + y_hls4ml.ravel(), |
| 303 | + atol=2**-eval(activation_quantizer).bits, |
| 304 | + rtol=1.0 |
| 305 | + ) |
| 306 | + else: |
| 307 | + if activation_quantizer == 'binary': |
| 308 | + y_hls4ml = np.where(y_hls4ml == 0, -1, 1) |
| 309 | + wrong = (y_hls4ml != y_qkeras).ravel() |
| 310 | + assert sum(wrong) / len(wrong) <= 0.005 |
0 commit comments