Skip to content

Changes to ingest-qonnx #461

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Dec 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion hls4ml/converters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def construct_keras_model(loader, node):

print('Loading configuration from', config_file)
with open(config_file, 'r') as file:
parsed_config = yaml.load(file, Loader=yaml.SafeLoader)
parsed_config = yaml.safe_load(file)
return parsed_config

def convert_from_config(config):
Expand Down
10 changes: 5 additions & 5 deletions hls4ml/converters/keras/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,18 +86,18 @@ def parse_activation_layer(keras_layer, input_names, input_shapes, data_reader,
if layer['class_name'] != 'Activation':
layer['activation'] = layer['class_name']
if layer['class_name'] == 'LeakyReLU':
layer['activ_param'] = keras_layer["config"].get('alpha', 0.3)
layer['activ_param'] = keras_layer['config'].get('alpha', 0.3)
elif layer['class_name'] == 'ThresholdedReLU':
layer['activ_param'] = keras_layer["config"].get('theta', 1.)
layer['activ_param'] = keras_layer['config'].get('theta', 1.)
elif layer['class_name'] == 'ELU':
layer['activ_param'] = keras_layer["config"].get('alpha', 1.)
layer['activ_param'] = keras_layer['config'].get('alpha', 1.)
elif layer['class_name'] == 'ReLU':
layer['class_name'] = 'Activation'

if layer['class_name'] == 'Activation' and layer['activation'] == 'softmax':
layer['class_name'] = 'Softmax'
if layer['class_name'] == 'ReLU':
layer['class_name'] = 'Activation'
if layer['class_name'] == 'Softmax':
layer['axis'] = keras_layer['config'].get('axis', -1)

return layer, [shape for shape in input_shapes[0]]

Expand Down
1 change: 1 addition & 0 deletions hls4ml/converters/keras_to_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,7 @@ def keras_to_hls(config):
act_layer['class_name'] = layer['activation']
elif layer['activation'] == 'softmax':
act_layer['class_name'] = 'Softmax'
act_layer['axis'] = -1
else:
act_layer['class_name'] = 'Activation'
inputs_map[layer['name']] = act_layer['name']
Expand Down
5 changes: 4 additions & 1 deletion hls4ml/converters/onnx/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,10 @@ def parse_matmul_layer(reader, node, inputs_map, input_shapes, graph, config):
return layer

#------------------Global paras for activations
activation_layers = ['Relu', 'Tanh', 'Sigmoid', 'LeakyRelu', 'ThresholdedRelu', 'HardSigmoid', 'Elu', 'Selu', 'PRelu', 'Softmax', 'Softsign', 'Softplus', 'Clip']
# TODO: repair HardSigmoid support
# https://github.com/fastmachinelearning/hls4ml/issues/409
#activation_layers = ['Relu', 'Tanh', 'Sigmoid', 'LeakyRelu', 'ThresholdedRelu', 'HardSigmoid', 'Elu', 'Selu', 'PRelu', 'Softmax', 'Softsign', 'Softplus', 'Clip']
activation_layers = ['Relu', 'Tanh', 'Sigmoid', 'LeakyRelu', 'ThresholdedRelu', 'Elu', 'Selu', 'PRelu', 'Softmax', 'Softsign', 'Softplus', 'Clip']

activation_map = {'Relu':'ReLU', 'Tanh':'Activation',
'Sigmoid':'Activation', 'LeakyRelu':'LeakyReLU',
Expand Down
9 changes: 7 additions & 2 deletions hls4ml/converters/pytorch/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

from hls4ml.converters.pytorch_to_hls import pytorch_handler

# TODO: propagate use_bias info properly
# https://github.com/fastmachinelearning/hls4ml/issues/409
@pytorch_handler('Linear')
def parse_linear_layer(pytorch_layer, layer_name, input_shapes, data_reader, config):
assert('Linear' in pytorch_layer.__class__.__name__)
Expand All @@ -15,6 +17,7 @@ def parse_linear_layer(pytorch_layer, layer_name, input_shapes, data_reader, con
layer['n_out'] = pytorch_layer.out_features

#Handling whether bias is used or not
assert not pytorch_layer.bias is None, "PyTorch Linear with bias=False not yet supported"
if pytorch_layer.bias is None:
layer['use_bias'] = False
else:
Expand All @@ -24,8 +27,10 @@ def parse_linear_layer(pytorch_layer, layer_name, input_shapes, data_reader, con

return layer, output_shape


activation_layers = ['LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax', 'ReLU']
# TODO: propagate parametrized activation parameters
# https://github.com/fastmachinelearning/hls4ml/issues/409
# activation_layers = ['LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax', 'ReLU']
activation_layers = ['Softmax', 'ReLU']
@pytorch_handler(*activation_layers)
def parse_activation_layer(pytorch_layer, layer_name, input_shapes, data_reader, config):

Expand Down
3 changes: 3 additions & 0 deletions hls4ml/model/hls_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1503,6 +1503,9 @@ def initialize(self):
self.set_attr('implementation', 'latency')
else:
self.set_attr('implementation', self.model.config.get_strategy(self).lower())

if self.model.config.get_config_value('IOType') == 'io_parallel':
assert len(self.get_input_variable().shape) == 1, 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.'

class TernaryTanh(Activation):
def initialize(self):
Expand Down
40 changes: 22 additions & 18 deletions hls4ml/model/hls_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,22 +557,22 @@ def _get_top_function(self, x):
xlist = [x]
else:
xlist = x

for x in xlist:
if not isinstance(x, np.ndarray):
for xi in xlist:
if not isinstance(xi, np.ndarray):
raise Exception('Expected numpy.ndarray, but got {}'.format(type(x)))
if not x.flags['C_CONTIGUOUS']:
if not xi.flags['C_CONTIGUOUS']:
raise Exception('Array must be c_contiguous, try using numpy.ascontiguousarray(x)')

x = xlist[0]
if x.dtype in [np.single, np.float32]:
x0 = xlist[0]
if x0.dtype in [np.single, np.float32]:
top_function = getattr(self._top_function_lib, self.config.get_project_name() + '_float')
ctype = ctypes.c_float
elif x.dtype in [np.double, np.float64, np.float_]:
elif x0.dtype in [np.double, np.float64, np.float_]:
top_function = getattr(self._top_function_lib, self.config.get_project_name() + '_double')
ctype = ctypes.c_double
else:
raise Exception('Invalid type ({}) of numpy array. Supported types are: single, float32, double, float64, float_.'.format(x.dtype))
raise Exception('Invalid type ({}) of numpy array. Supported types are: single, float32, double, float64, float_.'.format(x0.dtype))


top_function.restype = None
Expand All @@ -587,9 +587,9 @@ def _compute_n_samples(self, x):
else:
xlist = x
n_samples = []
for i, x in enumerate(xlist):
for i, xi in enumerate(xlist):
expected_size = self.get_input_variables()[i].size()
x_size = np.prod(x.shape)
x_size = np.prod(xi.shape)
n_sample, rem = divmod(x_size, expected_size)
if rem != 0:
raise Exception('Input size mismatch, got {}, expected {}'.format(x_size.shape, self.get_input_variables()[i].shape))
Expand All @@ -603,23 +603,25 @@ def _compute_n_samples(self, x):
def predict(self, x):
top_function, ctype = self._get_top_function(x)
n_samples = self._compute_n_samples(x)
n_inputs = len(self.get_input_variables())

curr_dir = os.getcwd()
os.chdir(self.config.get_output_dir() + '/firmware')

output = []
if n_samples == 1:
if n_samples == 1 and n_inputs == 1:
x = [x]

try:
for i in range(n_samples):
predictions = np.zeros(self.get_output_variables()[0].size(), dtype=ctype)
if len(self.get_input_variables()) == 1:
if n_inputs == 1:
top_function(x[i], predictions, ctypes.byref(ctypes.c_ushort()), ctypes.byref(ctypes.c_ushort()))
else:
argtuple = [xi for xi in x[i]]
inp = [xj[i] for xj in x]
argtuple = inp
argtuple += [predictions]
argtuple += [ctypes.byref(ctypes.c_ushort()) for i in range(len(x[i])+1)]
argtuple += [ctypes.byref(ctypes.c_ushort()) for k in range(len(inp)+1)]
argtuple = tuple(argtuple)
top_function(*argtuple)
output.append(predictions)
Expand All @@ -642,6 +644,7 @@ def trace(self, x):

top_function, ctype = self._get_top_function(x)
n_samples = self._compute_n_samples(x)
n_inputs = len(self.get_input_variables())

class TraceData(ctypes.Structure):
_fields_ = [('name', ctypes.c_char_p),
Expand Down Expand Up @@ -673,20 +676,21 @@ class TraceData(ctypes.Structure):
os.chdir(self.config.get_output_dir() + '/firmware')

output = []
if n_samples == 1:
if n_samples == 1 and n_inputs == 1:
x = [x]

try:
alloc_func(ctypes.sizeof(ctype))

for i in range(n_samples):
predictions = np.zeros(self.get_output_variables()[0].size(), dtype=ctype)
if len(self.get_input_variables()) == 1:
if n_inputs == 1:
top_function(x[i], predictions, ctypes.byref(ctypes.c_ushort()), ctypes.byref(ctypes.c_ushort()))
else:
argtuple = [xi for xi in x[i]]
inp = [xj[i] for xj in x]
argtuple = inp
argtuple += [predictions]
argtuple += [ctypes.byref(ctypes.c_ushort()) for i in range(len(x[i])+1)]
argtuple += [ctypes.byref(ctypes.c_ushort()) for k in range(len(inp)+1)]
argtuple = tuple(argtuple)
top_function(*argtuple)
output.append(predictions)
Expand Down
9 changes: 6 additions & 3 deletions hls4ml/model/optimizer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
from hls4ml.model.optimizer.passes.conv_same_pad import InsertZeroPaddingBeforeConv2D
from hls4ml.model.optimizer.passes.pointwise import OptimizePointwiseConv
from hls4ml.model.optimizer.passes.clone import CloneOutput
from hls4ml.model.optimizer.passes.repack_stream import ReshapeStream, BroadcastStream
from hls4ml.model.optimizer.passes.repack_stream import ReshapeStream, BroadcastStream, RemoveFinalReshape
from hls4ml.model.optimizer.passes.transpose_opt import RemoveUselessTranspose
from hls4ml.model.optimizer.passes.multi_dense import ReplaceMultidimensionalDenseWithConv
from hls4ml.model.optimizer.passes.reshape_const import ReshapeConstant
from hls4ml.model.optimizer.passes.quant_opt import QuantConstantParameters, QuantToBatchNorm
from hls4ml.model.optimizer.passes.quant_opt import QuantConstantParameters, QuantFactorizeScale, QuantToActivation, QuantToConstant
from hls4ml.model.optimizer.passes.batchnorm_opt import BatchNormConstantParameters, ConstantBatchNormMerging, FuseConsecutiveBatchNormalization
from hls4ml.model.optimizer.passes.merge_const import MergeTwoConstant, MergeToBatchNormalization, MergeToBatchNormalizationDiv
from hls4ml.model.optimizer.passes.matmul_const_to_dense import MatmulConstToDense
Expand All @@ -40,7 +40,9 @@

register_pass('reshape_constant', ReshapeConstant)
register_pass('quant_constant_params', QuantConstantParameters)
register_pass('quant_to_batchnorm', QuantToBatchNorm)
register_pass('quant_factorize_scale', QuantFactorizeScale)
register_pass('quant_to_activation', QuantToActivation)
register_pass('quant_to_constant', QuantToConstant)
register_pass('batch_norm_constant_parameters', BatchNormConstantParameters)
register_pass('fuse_consecutive_base_batch_normalizations', FuseConsecutiveBatchNormalization)
register_pass('constant_batch_norm_fusion', ConstantBatchNormMerging)
Expand All @@ -58,6 +60,7 @@
register_pass('conv2d_same_pad', InsertZeroPaddingBeforeConv2D)
register_pass('optimize_pointwise_conv', OptimizePointwiseConv)
register_pass('clone_output', CloneOutput)
register_pass('remove_final_reshape', RemoveFinalReshape)
register_pass('reshape_stream', ReshapeStream)
register_pass('remove_useless_transpose', RemoveUselessTranspose)
register_pass('replace_multidense_conv', ReplaceMultidimensionalDenseWithConv)
Expand Down
Loading