@@ -50,6 +50,7 @@ def load_jettagging_model():
50
50
model .load_weights (example_model_path / 'keras/qkeras_3layer_weights.h5' )
51
51
return model
52
52
53
+ # TODO - Paramaterize for Quartus (different strategies?)
53
54
@pytest .fixture
54
55
@pytest .mark .parametrize ('strategy' , ['latency' , 'resource' ])
55
56
def convert (load_jettagging_model , strategy ):
@@ -111,7 +112,8 @@ def randX_100_16():
111
112
# https://github.com/fastmachinelearning/hls4ml/issues/381
112
113
#@pytest.mark.parametrize('bits', [4, 6, 8])
113
114
@pytest .mark .parametrize ('bits,alpha' , [(4 , 1 ), (4 , 'auto_po2' )])
114
- def test_single_dense_activation_exact (randX_100_16 , bits , alpha ):
115
+ @pytest .mark .parametrize ('backend' , ['Vivado' , 'Quartus' ])
116
+ def test_single_dense_activation_exact (randX_100_16 , bits , alpha , backend ):
115
117
'''
116
118
Test a single Dense -> Activation layer topology for
117
119
bit exactness with number of bits parameter
@@ -126,10 +128,11 @@ def test_single_dense_activation_exact(randX_100_16, bits, alpha):
126
128
127
129
hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = ['relu1' ], rounding_mode = 'AP_RND_CONV' , saturation_mode = 'AP_SAT' )
128
130
config = hls4ml .utils .config_from_keras_model (model , granularity = 'name' )
131
+ output_dir = str (test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}_{}' .format (bits , alpha , backend ))
129
132
hls_model = hls4ml .converters .convert_from_keras_model (model ,
130
133
hls_config = config ,
131
- output_dir = str ( test_root_path / 'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}' . format ( bits , alpha )) ,
132
- part = 'xcu250-figd2104-2L-e' )
134
+ output_dir = output_dir ,
135
+ backend = backend )
133
136
hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = [])
134
137
hls_model .compile ()
135
138
@@ -164,11 +167,13 @@ def randX_100_10():
164
167
(5 , 10 , ternary (alpha = 'auto' ), quantized_bits (5 ,2 ), ternary (threshold = 0.2 ), True , False ),
165
168
(6 , 10 , ternary (alpha = 'auto' ), quantized_bits (5 ,2 ), ternary (threshold = 0.8 ), True , False ),
166
169
(7 , 10 , binary (), quantized_bits (5 ,2 ), binary (), False , True )])
167
- def test_btnn (make_btnn , randX_100_10 ):
170
+ @pytest .mark .parametrize ('backend' , ['Vivado' , 'Quartus' ])
171
+ def test_btnn (make_btnn , randX_100_10 , backend ):
168
172
model , is_xnor , test_no = make_btnn
169
173
X = randX_100_10
170
174
cfg = hls4ml .utils .config_from_keras_model (model , granularity = 'name' )
171
- hls_model = hls4ml .converters .convert_from_keras_model (model , output_dir = str (test_root_path / 'hls4mlprj_btnn_{}' .format (test_no )), hls_config = cfg )
175
+ output_dir = str (test_root_path / 'hls4mlprj_btnn_{}_{}' .format (test_no , backend ))
176
+ hls_model = hls4ml .converters .convert_from_keras_model (model , output_dir = output_dir , hls_config = cfg , backend = backend )
172
177
hls_model .compile ()
173
178
y_hls = hls_model .predict (X )
174
179
# hls4ml may return XNOR binary
@@ -195,7 +200,8 @@ def randX_1000_1():
195
200
(quantized_relu (8 ,4 )),
196
201
(quantized_relu (10 )),
197
202
(quantized_relu (10 ,5 ))])
198
- def test_quantizer (randX_1000_1 , quantizer ):
203
+ @pytest .mark .parametrize ('backend' , ['Vivado' , 'Quartus' ])
204
+ def test_quantizer (randX_1000_1 , quantizer , backend ):
199
205
'''
200
206
Test a single quantizer as an Activation function.
201
207
Checks the type inference through the conversion is correct without just
@@ -209,12 +215,12 @@ def test_quantizer(randX_1000_1, quantizer):
209
215
210
216
hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = ['quantizer' ], rounding_mode = 'AP_RND_CONV' , saturation_mode = 'AP_SAT' )
211
217
config = hls4ml .utils .config_from_keras_model (model , granularity = 'name' )
212
- output_dir = str (test_root_path / 'hls4mlprj_qkeras_quantizer_{}_{}_{}' .format (quantizer .__class__ .__name__ ,
213
- quantizer .bits , quantizer .integer ))
218
+ output_dir = str (test_root_path / 'hls4mlprj_qkeras_quantizer_{}_{}_{}_{} ' .format (quantizer .__class__ .__name__ ,
219
+ quantizer .bits , quantizer .integer , backend ))
214
220
hls_model = hls4ml .converters .convert_from_keras_model (model ,
215
221
hls_config = config ,
216
222
output_dir = output_dir ,
217
- part = 'xcu250-figd2104-2L-e' )
223
+ backend = backend )
218
224
hls4ml .model .optimizer .get_optimizer ('output_rounding_saturation_mode' ).configure (layers = [])
219
225
hls_model .compile ()
220
226
0 commit comments