Skip to content

Commit 35fe572

Browse files
jmduartejmitrevsvloncar
authored
Merge with main (#52)
* Add quantized sigmoid, fix quantized tanh for QKeras (#569) * snapshot of beginnings * make version that works for Vivado, error for Quartus * Change order of precision from quantizer * add hard sigmoid and tanh * fix setting of slope and shift type * revert config parsing--seems a little strange but works * fix hard_sigmoid and hard_tanh for streaming * update pytest for quantized tanh and sigmoid * remove inadvertently included matoplotlib * add special case when W == min_width. * fix merge of main * Go back to having AP_TRN and AP_WRP as defaults * handle case when use_real_tanh is not defined * make the activations use AP_RND_CONV (and AP_SAT) by default * remove use of use_real_tanh in test since not always supported * fix incorrect default types for Keras (not QKeras) hard_sigmoid * Mostly fix up things for Quartus * get rid of intermediate cast * fix an i++ compilation issue * Quartus seems to not like ac_fixed<1,0,false>, so make 2 bits. * fix activation quantizer * make sat, round defeult activation parameters, don't need to set again * Make the slope and shift not be configurable for HardActivation * some pre-commit fixes * pre-commint //hls to // hls fixes * update CI version * fixes for parsing errors from pre-commits * remove qactivation from list of activation_layers * print_vivado_report function for nicer reports (#730) * print_vivado_report function for fancier reports * Fancy reports (#51) * fix uram divide by 0 * add test * fix parsing of vsynth in 2020.1; add test * Update test_report.py * exclude pregenerated reports --------- Co-authored-by: Javier Duarte <[email protected]> --------- Co-authored-by: Jovan Mitrevski <[email protected]> Co-authored-by: Vladimir <[email protected]>
1 parent bfffab8 commit 35fe572

39 files changed

+3443
-1438
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
exclude: ^hls4ml\/templates\/(vivado|quartus)\/(ap_types|ac_types)\/
1+
exclude: (^hls4ml\/templates\/(vivado|quartus)\/(ap_types|ac_types)\/|^test/pytest/test_report/)
22

33
repos:
44
- repo: https://github.com/psf/black

hls4ml/backends/quartus/passes/core_templates.py

Lines changed: 46 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
21
from hls4ml.backends.backend import get_backend
3-
from hls4ml.model.layers import Activation, BatchNormalization, Dense, PReLU, ParametrizedActivation, Softmax
4-
from hls4ml.backends.template import LayerConfigTemplate, FunctionCallTemplate
2+
from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate
3+
from hls4ml.model.layers import Activation, BatchNormalization, Dense, HardActivation, ParametrizedActivation, PReLU, Softmax
54

65
# Dense templates
76

@@ -38,24 +37,28 @@
3837

3938
dense_include_list = ['nnet_utils/nnet_dense.h', 'nnet_utils/nnet_dense_compressed.h', 'nnet_utils/nnet_dense_stream.h']
4039

40+
4141
class DenseConfigTemplate(LayerConfigTemplate):
4242
def __init__(self):
4343
super().__init__(Dense)
4444
self.template = dense_config_template
45-
45+
4646
def format(self, node):
4747
params = self._default_config_params(node)
4848
params['nzeros'] = node.get_weights('weight').nzeros
4949
params['nonzeros'] = node.get_weights('weight').nonzeros
50-
params['product_type'] = get_backend('quartus').product_type(node.get_input_variable().type.precision, node.get_weights('weight').type.precision)
50+
params['product_type'] = get_backend('quartus').product_type(
51+
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
52+
)
5153

5254
return self.template.format(**params)
5355

56+
5457
class DenseFunctionTemplate(FunctionCallTemplate):
5558
def __init__(self):
5659
super().__init__(Dense, include_header=dense_include_list)
5760
self.template = dense_function_template
58-
61+
5962
def format(self, node):
6063
params = self._default_function_params(node)
6164
params['w'] = node.get_weights('weight').name
@@ -82,23 +85,27 @@ def format(self, node):
8285

8386
batchnorm_include_list = ['nnet_utils/nnet_batchnorm.h', 'nnet_utils/nnet_batchnorm_stream.h']
8487

88+
8589
class BatchNormalizationConfigTemplate(LayerConfigTemplate):
8690
def __init__(self):
8791
super().__init__(BatchNormalization)
8892
self.template = batchnorm_config_template
89-
93+
9094
def format(self, node):
9195
params = self._default_config_params(node)
9296
params['n_in'] = node.get_input_variable().size_cpp()
93-
params['product_type'] = get_backend('quartus').product_type(node.get_input_variable().type.precision, node.get_weights('scale').type.precision)
97+
params['product_type'] = get_backend('quartus').product_type(
98+
node.get_input_variable().type.precision, node.get_weights('scale').type.precision
99+
)
94100

95101
return self.template.format(**params)
96102

103+
97104
class BatchNormalizationFunctionTemplate(FunctionCallTemplate):
98105
def __init__(self):
99106
super().__init__(BatchNormalization, include_header=batchnorm_include_list)
100107
self.template = batchnorm_function_template
101-
108+
102109
def format(self, node):
103110
params = self._default_function_params(node)
104111
params['scale'] = node.get_weights('scale').name
@@ -117,6 +124,16 @@ def format(self, node):
117124
typedef {table_t.name} table_t;
118125
}};\n"""
119126

127+
hard_activ_config_template = """struct {type}_config{index} {{
128+
static const unsigned n_in = {n_in};
129+
static const {slope_t.name} slope;
130+
static const {shift_t.name} shift;
131+
static const unsigned io_type = nnet::{iotype};
132+
static const unsigned reuse_factor = {reuse};
133+
}};
134+
const {slope_t.name} {type}_config{index}::slope = {slope};
135+
const {shift_t.name} {type}_config{index}::shift = {shift};\n"""
136+
120137
softmax_config_template = """struct {type}_config{index} : nnet::activ_config {{
121138
static const unsigned n_in = {n_in};
122139
static const unsigned table_size = {table_size};
@@ -132,6 +149,7 @@ def format(self, node):
132149

133150
activ_include_list = ['nnet_utils/nnet_activation.h', 'nnet_utils/nnet_activation_stream.h']
134151

152+
135153
class ActivationConfigTemplate(LayerConfigTemplate):
136154
def __init__(self):
137155
super().__init__((Activation, ParametrizedActivation, PReLU))
@@ -143,23 +161,38 @@ def format(self, node):
143161

144162
return self.template.format(**params)
145163

164+
165+
class HardActivationConfigTemplate(LayerConfigTemplate):
166+
def __init__(self):
167+
super().__init__(HardActivation)
168+
self.template = hard_activ_config_template
169+
170+
def format(self, node):
171+
params = self._default_config_params(node)
172+
params['type'] = node.get_attr('activation')
173+
174+
return self.template.format(**params)
175+
176+
146177
class SoftmaxConfigTemplate(ActivationConfigTemplate):
147178
def __init__(self):
148-
super(ActivationConfigTemplate, self).__init__(Softmax) # Skip ActivationConfigTemplate's __init__
179+
super(ActivationConfigTemplate, self).__init__(Softmax) # Skip ActivationConfigTemplate's __init__
149180
self.template = softmax_config_template
150181

182+
151183
class ActivationFunctionTemplate(FunctionCallTemplate):
152184
def __init__(self):
153-
super().__init__((Activation, Softmax), include_header=activ_include_list)
185+
super().__init__((Activation, HardActivation, Softmax), include_header=activ_include_list)
154186
self.template = activ_function_template
155-
187+
156188
def format(self, node):
157189
params = self._default_function_params(node)
158190
params['activation'] = node.get_attr('activation').lower()
159191
params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index)
160192

161193
return self.template.format(**params)
162194

195+
163196
class ParametrizedActivationFunctionTemplate(FunctionCallTemplate):
164197
def __init__(self):
165198
super().__init__(ParametrizedActivation, include_header=activ_include_list)
@@ -173,6 +206,7 @@ def format(self, node):
173206

174207
return self.template.format(**params)
175208

209+
176210
class PReLUFunctionTemplate(FunctionCallTemplate):
177211
def __init__(self):
178212
super().__init__(PReLU, include_header=activ_include_list)

hls4ml/backends/vivado/passes/core_templates.py

Lines changed: 46 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
21
from hls4ml.backends.backend import get_backend
3-
from hls4ml.model.layers import Activation, BatchNormalization, Dense, Embedding, PReLU, ParametrizedActivation, Softmax
4-
from hls4ml.backends.template import LayerConfigTemplate, FunctionCallTemplate
2+
from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate
3+
from hls4ml.model.layers import Activation, BatchNormalization, Dense, HardActivation, ParametrizedActivation, PReLU, Softmax
54

65
# Dense templates
76

@@ -27,24 +26,28 @@
2726

2827
dense_include_list = ['nnet_utils/nnet_dense.h', 'nnet_utils/nnet_dense_compressed.h', 'nnet_utils/nnet_dense_stream.h']
2928

29+
3030
class DenseConfigTemplate(LayerConfigTemplate):
3131
def __init__(self):
3232
super().__init__(Dense)
3333
self.template = dense_config_template
34-
34+
3535
def format(self, node):
3636
params = self._default_config_params(node)
3737
params['nzeros'] = node.get_weights('weight').nzeros
3838
params['nonzeros'] = node.get_weights('weight').nonzeros
39-
params['product_type'] = get_backend('vivado').product_type(node.get_input_variable().type.precision, node.get_weights('weight').type.precision)
39+
params['product_type'] = get_backend('vivado').product_type(
40+
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
41+
)
4042

4143
return self.template.format(**params)
4244

45+
4346
class DenseFunctionTemplate(FunctionCallTemplate):
4447
def __init__(self):
4548
super().__init__(Dense, include_header=dense_include_list)
4649
self.template = dense_function_template
47-
50+
4851
def format(self, node):
4952
params = self._default_function_params(node)
5053
params['w'] = node.get_weights('weight').name
@@ -73,23 +76,27 @@ def format(self, node):
7376

7477
batchnorm_include_list = ['nnet_utils/nnet_batchnorm.h', 'nnet_utils/nnet_batchnorm_stream.h']
7578

79+
7680
class BatchNormalizationConfigTemplate(LayerConfigTemplate):
7781
def __init__(self):
7882
super().__init__(BatchNormalization)
7983
self.template = batchnorm_config_template
80-
84+
8185
def format(self, node):
8286
params = self._default_config_params(node)
8387
params['n_in'] = node.get_input_variable().size_cpp()
84-
params['product_type'] = get_backend('vivado').product_type(node.get_input_variable().type.precision, node.get_weights('scale').type.precision)
88+
params['product_type'] = get_backend('vivado').product_type(
89+
node.get_input_variable().type.precision, node.get_weights('scale').type.precision
90+
)
8591

8692
return self.template.format(**params)
8793

94+
8895
class BatchNormalizationFunctionTemplate(FunctionCallTemplate):
8996
def __init__(self):
9097
super().__init__(BatchNormalization, include_header=batchnorm_include_list)
9198
self.template = batchnorm_function_template
92-
99+
93100
def format(self, node):
94101
params = self._default_function_params(node)
95102
params['scale'] = node.get_weights('scale').name
@@ -108,6 +115,16 @@ def format(self, node):
108115
typedef {table_t.name} table_t;
109116
}};\n"""
110117

118+
hard_activ_config_template = """struct {type}_config{index} {{
119+
static const unsigned n_in = {n_in};
120+
static const {slope_t.name} slope;
121+
static const {shift_t.name} shift;
122+
static const unsigned io_type = nnet::{iotype};
123+
static const unsigned reuse_factor = {reuse};
124+
}};
125+
const {slope_t.name} {type}_config{index}::slope = {slope};
126+
const {shift_t.name} {type}_config{index}::shift = {shift};\n"""
127+
111128
softmax_config_template = """struct {type}_config{index} : nnet::activ_config {{
112129
static const unsigned n_in = {n_in};
113130
static const unsigned table_size = {table_size};
@@ -124,6 +141,7 @@ def format(self, node):
124141

125142
activ_include_list = ['nnet_utils/nnet_activation.h', 'nnet_utils/nnet_activation_stream.h']
126143

144+
127145
class ActivationConfigTemplate(LayerConfigTemplate):
128146
def __init__(self):
129147
super().__init__((Activation, ParametrizedActivation, PReLU))
@@ -135,23 +153,38 @@ def format(self, node):
135153

136154
return self.template.format(**params)
137155

156+
157+
class HardActivationConfigTemplate(LayerConfigTemplate):
158+
def __init__(self):
159+
super().__init__(HardActivation)
160+
self.template = hard_activ_config_template
161+
162+
def format(self, node):
163+
params = self._default_config_params(node)
164+
params['type'] = node.get_attr('activation')
165+
166+
return self.template.format(**params)
167+
168+
138169
class SoftmaxConfigTemplate(ActivationConfigTemplate):
139170
def __init__(self):
140-
super(ActivationConfigTemplate, self).__init__(Softmax) # Skip ActivationConfigTemplate's __init__
171+
super(ActivationConfigTemplate, self).__init__(Softmax) # Skip ActivationConfigTemplate's __init__
141172
self.template = softmax_config_template
142173

174+
143175
class ActivationFunctionTemplate(FunctionCallTemplate):
144176
def __init__(self):
145-
super().__init__((Activation, Softmax), include_header=activ_include_list)
177+
super().__init__((Activation, HardActivation, Softmax), include_header=activ_include_list)
146178
self.template = activ_function_template
147-
179+
148180
def format(self, node):
149181
params = self._default_function_params(node)
150182
params['activation'] = node.get_attr('activation').lower()
151183
params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index)
152184

153185
return self.template.format(**params)
154186

187+
155188
class ParametrizedActivationFunctionTemplate(FunctionCallTemplate):
156189
def __init__(self):
157190
super().__init__(ParametrizedActivation, include_header=activ_include_list)
@@ -165,6 +198,7 @@ def format(self, node):
165198

166199
return self.template.format(**params)
167200

201+
168202
class PReLUFunctionTemplate(FunctionCallTemplate):
169203
def __init__(self):
170204
super().__init__(PReLU, include_header=activ_include_list)

hls4ml/converters/keras/core.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,8 @@ def parse_activation_layer(keras_layer, input_names, input_shapes, data_reader):
105105

106106
if layer['class_name'] == 'Activation' and layer['activation'] == 'softmax':
107107
layer['class_name'] = 'Softmax'
108+
if layer['class_name'] == 'Activation' and layer['activation'] == 'hard_sigmoid':
109+
layer['class_name'] = 'HardActivation'
108110
if layer['class_name'] == 'Softmax':
109111
layer['axis'] = keras_layer['config'].get('axis', -1)
110112

hls4ml/converters/keras/qkeras_layers.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from hls4ml.converters.keras.core import parse_batchnorm_layer, parse_dense_layer
55
from hls4ml.converters.keras.qkeras import get_quantizer_from_config
66
from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer
7+
from hls4ml.model.types import FixedPrecisionType
78

89

910
@keras_handler('QDense')
@@ -46,6 +47,7 @@ def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader)
4647
'quantized_tanh',
4748
'binary_tanh',
4849
'ternary_tanh',
50+
'quantized_sigmoid',
4951
'quantized_bits',
5052
'binary',
5153
'ternary',
@@ -79,16 +81,32 @@ def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader)
7981
if activation_config['class_name'] not in supported_activations:
8082
raise Exception('Unsupported QKeras activation: {}'.format(activation_config['class_name']))
8183

84+
if activation_config['class_name'] == 'quantized_bits':
85+
activation_config['class_name'] = 'linear'
86+
8287
if activation_config['class_name'] == 'ternary_tanh':
8388
layer['class_name'] = 'TernaryTanh'
8489
layer['threshold'] = activation_config.get('config', {}).get('threshold', 0.33)
8590
if layer['threshold'] is None:
8691
layer['threshold'] = 0.33 # the default ternary tanh threshold for QKeras
92+
layer['activation'] = 'ternary_tanh'
93+
elif (
94+
activation_config['class_name'] == 'quantized_sigmoid'
95+
and not activation_config['config'].get('use_real_sigmoid', False)
96+
) or (
97+
activation_config['class_name'] == 'quantized_tanh' and not activation_config['config'].get('use_real_tanh', False)
98+
):
99+
layer['class_name'] = 'HardActivation'
100+
layer['slope'] = 0.5 # the default values in QKeras
101+
layer['shift'] = 0.5
102+
# Quartus seems to have trouble if the width is 1.
103+
layer['slope_prec'] = FixedPrecisionType(width=2, integer=0, signed=False)
104+
layer['shift_prec'] = FixedPrecisionType(width=2, integer=0, signed=False)
105+
layer['activation'] = activation_config['class_name'].replace('quantized_', 'hard_')
87106
else:
88107
layer['class_name'] = 'Activation'
89-
if activation_config['class_name'] == 'quantized_bits':
90-
activation_config['class_name'] = 'linear'
91-
layer['activation'] = activation_config['class_name'].replace('quantized_', '')
108+
layer['activation'] = activation_config['class_name'].replace('quantized_', '')
109+
92110
layer['activation_quantizer'] = activation_config
93111
return layer, [shape for shape in input_shapes[0]]
94112

hls4ml/converters/keras_to_hls.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,16 @@ def parse_keras_model(model_arch, reader):
257257
# Define layers to skip for conversion to HLS
258258
skip_layers = ['Dropout']
259259
# Activation layers
260-
activation_layers = ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax', 'TernaryTanh']
260+
activation_layers = [
261+
'Activation',
262+
'LeakyReLU',
263+
'ThresholdedReLU',
264+
'ELU',
265+
'PReLU',
266+
'Softmax',
267+
'TernaryTanh',
268+
'HardActivation',
269+
]
261270
# Recurrent layers
262271
recurrent_layers = ['SimpleRNN', 'LSTM', 'GRU']
263272
# All supported layers

0 commit comments

Comments
 (0)