Skip to content

Commit 409bc13

Browse files
committed
Parameterized qkeras tests for Quartus
1 parent ac2450a commit 409bc13

File tree

1 file changed

+15
-9
lines changed

1 file changed

+15
-9
lines changed

test/pytest/test_qkeras.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ def load_jettagging_model():
5050
model.load_weights(example_model_path / 'keras/qkeras_3layer_weights.h5')
5151
return model
5252

53+
# TODO - Paramaterize for Quartus (different strategies?)
5354
@pytest.fixture
5455
@pytest.mark.parametrize('strategy', ['latency', 'resource'])
5556
def convert(load_jettagging_model, strategy):
@@ -111,7 +112,8 @@ def randX_100_16():
111112
# https://github.com/fastmachinelearning/hls4ml/issues/381
112113
#@pytest.mark.parametrize('bits', [4, 6, 8])
113114
@pytest.mark.parametrize('bits,alpha', [(4, 1), (4, 'auto_po2')])
114-
def test_single_dense_activation_exact(randX_100_16, bits, alpha):
115+
@pytest.mark.parametrize('backend', ['Vivado', 'Quartus'])
116+
def test_single_dense_activation_exact(randX_100_16, bits, alpha, backend):
115117
'''
116118
Test a single Dense -> Activation layer topology for
117119
bit exactness with number of bits parameter
@@ -126,10 +128,11 @@ def test_single_dense_activation_exact(randX_100_16, bits, alpha):
126128

127129
hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(layers=['relu1'], rounding_mode='AP_RND_CONV', saturation_mode='AP_SAT')
128130
config = hls4ml.utils.config_from_keras_model(model, granularity='name')
131+
output_dir = str(test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}_{}'.format(bits, alpha, backend))
129132
hls_model = hls4ml.converters.convert_from_keras_model(model,
130133
hls_config=config,
131-
output_dir=str(test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}'.format(bits, alpha)),
132-
part='xcu250-figd2104-2L-e')
134+
output_dir=output_dir,
135+
backend=backend)
133136
hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(layers=[])
134137
hls_model.compile()
135138

@@ -164,11 +167,13 @@ def randX_100_10():
164167
(5, 10, ternary(alpha='auto'), quantized_bits(5,2), ternary(threshold=0.2), True, False),
165168
(6, 10, ternary(alpha='auto'), quantized_bits(5,2), ternary(threshold=0.8), True, False),
166169
(7, 10, binary(), quantized_bits(5,2), binary(), False, True)])
167-
def test_btnn(make_btnn, randX_100_10):
170+
@pytest.mark.parametrize('backend', ['Vivado', 'Quartus'])
171+
def test_btnn(make_btnn, randX_100_10, backend):
168172
model, is_xnor, test_no = make_btnn
169173
X = randX_100_10
170174
cfg = hls4ml.utils.config_from_keras_model(model, granularity='name')
171-
hls_model = hls4ml.converters.convert_from_keras_model(model, output_dir=str(test_root_path / 'hls4mlprj_btnn_{}'.format(test_no)), hls_config=cfg)
175+
output_dir = str(test_root_path / 'hls4mlprj_btnn_{}_{}'.format(test_no, backend))
176+
hls_model = hls4ml.converters.convert_from_keras_model(model, output_dir=output_dir, hls_config=cfg, backend=backend)
172177
hls_model.compile()
173178
y_hls = hls_model.predict(X)
174179
# hls4ml may return XNOR binary
@@ -195,7 +200,8 @@ def randX_1000_1():
195200
(quantized_relu(8,4)),
196201
(quantized_relu(10)),
197202
(quantized_relu(10,5))])
198-
def test_quantizer(randX_1000_1, quantizer):
203+
@pytest.mark.parametrize('backend', ['Vivado', 'Quartus'])
204+
def test_quantizer(randX_1000_1, quantizer, backend):
199205
'''
200206
Test a single quantizer as an Activation function.
201207
Checks the type inference through the conversion is correct without just
@@ -209,12 +215,12 @@ def test_quantizer(randX_1000_1, quantizer):
209215

210216
hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(layers=['quantizer'], rounding_mode='AP_RND_CONV', saturation_mode='AP_SAT')
211217
config = hls4ml.utils.config_from_keras_model(model, granularity='name')
212-
output_dir = str(test_root_path / 'hls4mlprj_qkeras_quantizer_{}_{}_{}'.format(quantizer.__class__.__name__,
213-
quantizer.bits, quantizer.integer))
218+
output_dir = str(test_root_path / 'hls4mlprj_qkeras_quantizer_{}_{}_{}_{}'.format(quantizer.__class__.__name__,
219+
quantizer.bits, quantizer.integer, backend))
214220
hls_model = hls4ml.converters.convert_from_keras_model(model,
215221
hls_config=config,
216222
output_dir=output_dir,
217-
part='xcu250-figd2104-2L-e')
223+
backend=backend)
218224
hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(layers=[])
219225
hls_model.compile()
220226

0 commit comments

Comments
 (0)