Skip to content

Commit d82423d

Browse files
authored
Merge branch 'main' into poolingFix
2 parents 6a84e0a + a031b6a commit d82423d

File tree

16 files changed

+383
-436
lines changed

16 files changed

+383
-436
lines changed

contrib/garnet.py

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,21 +3,18 @@
33
"""
44

55
import tensorflow.keras as keras
6+
from qkeras import QActivation, QDense, ternary
67

78
K = keras.backend
89

9-
try:
10-
from qkeras import QActivation, QDense, ternary
1110

12-
class NamedQDense(QDense):
13-
def add_weight(self, name=None, **kwargs):
14-
return super().add_weight(name=f'{self.name}_{name}', **kwargs)
11+
class NamedQDense(QDense):
12+
def add_weight(self, name=None, **kwargs):
13+
return super().add_weight(name=f'{self.name}_{name}', **kwargs)
1514

16-
def ternary_1_05():
17-
return ternary(alpha=1.0, threshold=0.5)
1815

19-
except ImportError:
20-
pass
16+
def ternary_1_05():
17+
return ternary(alpha=1.0, threshold=0.5)
2118

2219

2320
# Hack keras Dense to propagate the layer name into saved weights

hls4ml/backends/quartus/passes/merge_templates.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def __init__(self):
6868
def format(self, node):
6969
inp1 = node.get_input_variable(node.inputs[0])
7070
inp2 = node.get_input_variable(node.inputs[1])
71-
params = node._default_config_params()
71+
params = self._default_config_params(node)
7272
params['n_out'] = 1
7373
params['n_in'] = inp1.shape[0]
7474
params['product_type'] = get_backend('quartus').product_type(inp1.type.precision, inp2.type.precision)

hls4ml/backends/vivado/passes/merge_templates.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def __init__(self):
6565
def format(self, node):
6666
inp1 = node.get_input_variable(node.inputs[0])
6767
inp2 = node.get_input_variable(node.inputs[1])
68-
params = node._default_config_params()
68+
params = self._default_config_params(node)
6969
params['n_out'] = 1
7070
params['n_in'] = inp1.shape[0]
7171
params['product_type'] = get_backend('vivado').product_type(inp1.type.precision, inp2.type.precision)

hls4ml/converters/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@
6464
elif model_type == 'onnx':
6565
register_onnx_layer_handler(layer, func)
6666

67-
except ImportError:
67+
except ImportError as err:
68+
print(f'WARNING: Failed to import handlers from {module}: {err.msg}.')
6869
continue
6970

7071

hls4ml/converters/keras/core.py

Lines changed: 1 addition & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
1-
import numpy as np
2-
31
from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer
4-
from hls4ml.model.types import IntegerPrecisionType, Quantizer
2+
from hls4ml.model.types import BinaryQuantizer, IntegerPrecisionType, TernaryQuantizer
53

64

75
@keras_handler('InputLayer')
@@ -25,37 +23,6 @@ def parse_input_layer(keras_layer, input_names, input_shapes, data_reader):
2523
return layer, output_shape
2624

2725

28-
class BinaryQuantizer(Quantizer):
29-
def __init__(self, bits=2):
30-
if bits == 1:
31-
hls_type = IntegerPrecisionType(width=1, signed=False)
32-
elif bits == 2:
33-
hls_type = IntegerPrecisionType(width=2)
34-
else:
35-
raise Exception(f'BinaryQuantizer suppots 1 or 2 bits, but called with bits={bits}')
36-
super().__init__(bits, hls_type)
37-
38-
def __call__(self, data):
39-
zeros = np.zeros_like(data)
40-
ones = np.ones_like(data)
41-
quant_data = data
42-
if self.bits == 1:
43-
quant_data = np.where(data > 0, ones, zeros).astype('int')
44-
if self.bits == 2:
45-
quant_data = np.where(data > 0, ones, -ones)
46-
return quant_data
47-
48-
49-
class TernaryQuantizer(Quantizer):
50-
def __init__(self):
51-
super().__init__(2, IntegerPrecisionType(width=2))
52-
53-
def __call__(self, data):
54-
zeros = np.zeros_like(data)
55-
ones = np.ones_like(data)
56-
return np.where(data > 0.5, ones, np.where(data <= -0.5, -ones, zeros))
57-
58-
5926
dense_layers = ['Dense', 'BinaryDense', 'TernaryDense']
6027

6128

hls4ml/converters/keras/qkeras.py

Lines changed: 131 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -1,81 +1,9 @@
1-
import tensorflow as tf
21
from qkeras.quantizers import get_quantizer
32

4-
from hls4ml.converters.keras.core import BinaryQuantizer
5-
from hls4ml.model.types import ExponentPrecisionType, FixedPrecisionType, IntegerPrecisionType, Quantizer, XnorPrecisionType
6-
7-
8-
class QKerasQuantizer(Quantizer):
9-
def __init__(self, config):
10-
self.quantizer_fn = get_quantizer(config)
11-
self.alpha = config['config'].get('alpha', None)
12-
if config['class_name'] == 'quantized_bits':
13-
self.bits = config['config']['bits']
14-
self.hls_type = get_type(config)
15-
# ! includes stochastic_ternary
16-
elif 'ternary' in config['class_name']:
17-
self.bits = 2
18-
self.hls_type = IntegerPrecisionType(width=2, signed=True)
19-
# ! includes stochastic_binary
20-
elif 'binary' in config['class_name']:
21-
self.bits = 1
22-
self.hls_type = XnorPrecisionType()
23-
else:
24-
print("Unsupported quantizer: " + config['class_name'])
25-
self.bits = 16
26-
self.hls_type = FixedPrecisionType(width=16, integer=6, signed=True)
27-
28-
def __call__(self, data):
29-
tf_data = tf.convert_to_tensor(data)
30-
return self.quantizer_fn(tf_data).numpy()
31-
# return self.quantizer_fn(data)
32-
33-
34-
class QKerasBinaryQuantizer:
35-
def __init__(self, config, xnor=False):
36-
self.bits = 1 if xnor else 2
37-
self.hls_type = XnorPrecisionType() if xnor else IntegerPrecisionType(width=2, signed=True)
38-
self.alpha = config['config']['alpha']
39-
# Use the QKeras quantizer to handle any stochastic / alpha stuff
40-
self.quantizer_fn = get_quantizer(config)
41-
# Then we use our BinaryQuantizer to convert to '0,1' format
42-
self.binary_quantizer = BinaryQuantizer(1) if xnor else BinaryQuantizer(2)
43-
44-
def __call__(self, data):
45-
x = tf.convert_to_tensor(data)
46-
y = self.quantizer_fn(x).numpy()
47-
return self.binary_quantizer(y)
48-
49-
50-
class QKerasPO2Quantizer:
51-
def __init__(self, config):
52-
self.bits = config['config']['bits']
53-
self.quantizer_fn = get_quantizer(config)
54-
self.hls_type = ExponentPrecisionType(width=self.bits, signed=True)
55-
56-
def __call__(self, data):
57-
'''
58-
Weights are quantized to nearest power of two
59-
'''
60-
x = tf.convert_to_tensor(data)
61-
y = self.quantizer_fn(x)
62-
if hasattr(y, 'numpy'):
63-
y = y.numpy()
64-
return y
65-
66-
67-
def get_type(quantizer_config):
68-
width = quantizer_config['config']['bits']
69-
integer = quantizer_config['config'].get('integer', 0)
70-
if quantizer_config['class_name'] == 'quantized_po2':
71-
return ExponentPrecisionType(width=width, signed=True)
72-
if width == integer:
73-
if width == 1:
74-
return XnorPrecisionType()
75-
else:
76-
return IntegerPrecisionType(width=width, signed=True)
77-
else:
78-
return FixedPrecisionType(width=width, integer=integer + 1, signed=True)
3+
from hls4ml.converters.keras.convolution import parse_conv1d_layer, parse_conv2d_layer
4+
from hls4ml.converters.keras.core import parse_batchnorm_layer, parse_dense_layer
5+
from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer
6+
from hls4ml.model.types import FixedPrecisionType, QKerasBinaryQuantizer, QKerasPO2Quantizer, QKerasQuantizer
797

808

819
def get_quantizer_from_config(keras_layer, quantizer_var):
@@ -88,3 +16,130 @@ def get_quantizer_from_config(keras_layer, quantizer_var):
8816
return QKerasPO2Quantizer(quantizer_config)
8917
else:
9018
return QKerasQuantizer(quantizer_config)
19+
20+
21+
@keras_handler('QDense')
22+
def parse_qdense_layer(keras_layer, input_names, input_shapes, data_reader):
23+
24+
layer, output_shape = parse_dense_layer(keras_layer, input_names, input_shapes, data_reader)
25+
26+
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
27+
if keras_layer['config']['bias_quantizer'] is not None:
28+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
29+
else:
30+
layer['bias_quantizer'] = None
31+
32+
return layer, output_shape
33+
34+
35+
@keras_handler('QConv1D', 'QConv2D')
36+
def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader):
37+
assert 'QConv' in keras_layer['class_name']
38+
39+
if '1D' in keras_layer['class_name']:
40+
layer, output_shape = parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader)
41+
elif '2D' in keras_layer['class_name']:
42+
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
43+
44+
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
45+
if keras_layer['config']['bias_quantizer'] is not None:
46+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
47+
else:
48+
layer['bias_quantizer'] = None
49+
50+
return layer, output_shape
51+
52+
53+
@keras_handler('QActivation')
54+
def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader):
55+
assert keras_layer['class_name'] == 'QActivation'
56+
supported_activations = [
57+
'quantized_relu',
58+
'quantized_tanh',
59+
'binary_tanh',
60+
'ternary_tanh',
61+
'quantized_sigmoid',
62+
'quantized_bits',
63+
'binary',
64+
'ternary',
65+
]
66+
67+
layer = parse_default_keras_layer(keras_layer, input_names)
68+
69+
activation_config = keras_layer['config']['activation']
70+
quantizer_obj = get_quantizer(activation_config)
71+
activation_config = {}
72+
# some activations are classes
73+
if hasattr(quantizer_obj, 'get_config'):
74+
activation_config['class_name'] = quantizer_obj.__class__.__name__
75+
if activation_config['class_name'] == 'ternary' or activation_config['class_name'] == 'binary':
76+
activation_config['class_name'] += '_tanh'
77+
activation_config['config'] = quantizer_obj.get_config()
78+
# some activation quantizers are just functions with no config
79+
else:
80+
activation_config['config'] = {}
81+
if 'binary' in quantizer_obj.__name__:
82+
activation_config['class_name'] = 'binary_tanh'
83+
activation_config['config']['bits'] = 1
84+
activation_config['config']['integer'] = 1
85+
elif 'ternary' in quantizer_obj.__name__:
86+
activation_config['class_name'] = 'ternary_tanh'
87+
activation_config['config']['bits'] = 2
88+
activation_config['config']['integer'] = 2
89+
else:
90+
activation_config['class_name'] = 'unknown'
91+
92+
if activation_config['class_name'] not in supported_activations:
93+
raise Exception('Unsupported QKeras activation: {}'.format(activation_config['class_name']))
94+
95+
if activation_config['class_name'] == 'quantized_bits':
96+
activation_config['class_name'] = 'linear'
97+
98+
if activation_config['class_name'] == 'ternary_tanh':
99+
layer['class_name'] = 'TernaryTanh'
100+
layer['threshold'] = activation_config.get('config', {}).get('threshold', 0.33)
101+
if layer['threshold'] is None:
102+
layer['threshold'] = 0.33 # the default ternary tanh threshold for QKeras
103+
layer['activation'] = 'ternary_tanh'
104+
elif (
105+
activation_config['class_name'] == 'quantized_sigmoid'
106+
and not activation_config['config'].get('use_real_sigmoid', False)
107+
) or (
108+
activation_config['class_name'] == 'quantized_tanh' and not activation_config['config'].get('use_real_tanh', False)
109+
):
110+
layer['class_name'] = 'HardActivation'
111+
layer['slope'] = 0.5 # the default values in QKeras
112+
layer['shift'] = 0.5
113+
# Quartus seems to have trouble if the width is 1.
114+
layer['slope_prec'] = FixedPrecisionType(width=2, integer=0, signed=False)
115+
layer['shift_prec'] = FixedPrecisionType(width=2, integer=0, signed=False)
116+
layer['activation'] = activation_config['class_name'].replace('quantized_', 'hard_')
117+
else:
118+
layer['class_name'] = 'Activation'
119+
layer['activation'] = activation_config['class_name'].replace('quantized_', '')
120+
121+
layer['activation_quantizer'] = activation_config
122+
return layer, [shape for shape in input_shapes[0]]
123+
124+
125+
@keras_handler('QBatchNormalization')
126+
def parse_qbatchnorm_layer(keras_layer, input_names, input_shapes, data_reader):
127+
128+
layer, output_shape = parse_batchnorm_layer(keras_layer, input_names, input_shapes, data_reader)
129+
130+
layer['mean_quantizer'] = get_quantizer_from_config(keras_layer, 'mean')
131+
layer['variance_quantizer'] = get_quantizer_from_config(keras_layer, 'variance')
132+
layer['beta_quantizer'] = get_quantizer_from_config(keras_layer, 'beta')
133+
layer['gamma_quantizer'] = get_quantizer_from_config(keras_layer, 'gamma')
134+
135+
return layer, output_shape
136+
137+
138+
@keras_handler('QConv2DBatchnorm')
139+
def parse_qconv2dbatchnorm_layer(keras_layer, input_names, input_shapes, data_reader):
140+
intermediate_shape = list()
141+
conv_layer, shape_qconv = parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader)
142+
intermediate_shape.append(shape_qconv)
143+
temp_shape = intermediate_shape
144+
batch_layer, out_shape = parse_batchnorm_layer(keras_layer, input_names, temp_shape, data_reader)
145+
return {**conv_layer, **batch_layer}, out_shape

0 commit comments

Comments
 (0)