Skip to content

Update inplace variables to not lose connection when source node is optimized. #525

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 30 additions & 24 deletions hls4ml/backends/fpga/fpga_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,14 @@ class QuartusArrayVariableDefinition(VariableDefinition):
def definition_cpp(self, name_suffix='', as_reference=False):
return '{type} {name}{suffix}[{shape}] {pragma}'.format(type=self.type.name, name=self.cppname, suffix=name_suffix, shape=self.size_cpp(), pragma=self.pragma)

class VivadoInplaceArrayVariableDefinition(VariableDefinition):
def definition_cpp(self):
return f'auto& {self.cppname} = {self.input_var.cppname}'

class QuartusInplaceArrayVariableDefinition(VariableDefinition):
def definition_cpp(self):
return f'auto& {self.cppname} = {self.input_var.cppname}'

class ArrayVariableConverter(object):
def __init__(self, type_converter, prefix, definition_cls):
self.type_converter = type_converter
Expand All @@ -214,6 +222,14 @@ class QuartusArrayVariableConverter(ArrayVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Quartus', definition_cls=QuartusArrayVariableDefinition)

class VivadoInplaceArrayVariableConverter(ArrayVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoInplaceArrayVariableDefinition)

class QuartusInplaceArrayVariableConverter(ArrayVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Quartus', definition_cls=QuartusInplaceArrayVariableDefinition)

#endregion

#region StructMemberVariable
Expand Down Expand Up @@ -258,6 +274,10 @@ def definition_cpp(self, name_suffix='', as_reference=False):
else: # Declaration
return 'hls::stream<{type}> {name}{suffix}("{name}")'.format(type=self.type.name, name=self.cppname, suffix=name_suffix)

class VivadoInplaceStreamVariableDefinition(VariableDefinition):
def definition_cpp(self):
return f'auto& {self.cppname} = {self.input_var.cppname}'

class StreamVariableConverter(object):
def __init__(self, type_converter, prefix, definition_cls):
self.type_converter = type_converter
Expand All @@ -276,38 +296,24 @@ def convert(self, tensor_var, n_pack=1, depth=0):
tensor_var.__class__ = type(self.prefix + 'StreamVariable', (type(tensor_var), self.definition_cls), {})
return tensor_var

class VivadoStreamVariableConverter(StreamVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoStreamVariableDefinition)

#endregion

#region InplaceVariable

class InplaceVariableConverter(object):
def __init__(self, type_converter, prefix):
self.type_converter = type_converter
self.prefix = prefix

def convert(self, tensor_var, io_type):
if tensor_var.__class__.__name__.startswith(self.prefix): # Already converted
class InplaceStreamVariableConverter(StreamVariableConverter):
def convert(self, tensor_var, n_pack=1, depth=0):
if isinstance(tensor_var, self.definition_cls): # Already converted
return tensor_var

if io_type == 'io_stream':
tensor_var.type = self.type_converter.convert(PackedType(tensor_var.type.name, tensor_var.type.precision, tensor_var.shape[-1], n_pack=1))
else:
tensor_var.type = self.type_converter.convert(tensor_var.type)
tensor_var.pragma = None
tensor_var.type = self.type_converter.convert(PackedType(tensor_var.type.name, tensor_var.type.precision, tensor_var.input_var.shape[-1], n_pack))

tensor_var.__class__ = type(self.prefix + 'InplaceVariable', (type(tensor_var),), {})
tensor_var.__class__ = type(self.prefix + 'StreamVariable', (type(tensor_var), self.definition_cls), {})
return tensor_var

class VivadoInplaceVariableConverter(InplaceVariableConverter):
class VivadoStreamVariableConverter(StreamVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Vivado')
super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoStreamVariableDefinition)

class QuartusInplaceVariableConverter(InplaceVariableConverter):
class VivadoInplaceStreamVariableConverter(InplaceStreamVariableConverter):
def __init__(self, type_converter):
super().__init__(type_converter=type_converter, prefix='Quartus')
super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoInplaceStreamVariableDefinition)

#endregion

Expand Down
9 changes: 3 additions & 6 deletions hls4ml/backends/quartus/passes/transform_types.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@

from hls4ml.model.optimizer import GlobalOptimizerPass
from hls4ml.model.types import InplaceVariable
from hls4ml.backends.fpga.fpga_types import ACTypeConverter, QuartusArrayVariableConverter, HLSTypeConverter, QuartusInplaceVariableConverter, QuartusStructMemberVariableConverter, StaticWeightVariableConverter
from hls4ml.backends.fpga.fpga_types import (
ACTypeConverter, QuartusArrayVariableConverter, HLSTypeConverter,
QuartusStructMemberVariableConverter, StaticWeightVariableConverter)


class TransformTypes(GlobalOptimizerPass):
Expand All @@ -10,15 +11,11 @@ def __init__(self):
self.array_var_converter = QuartusArrayVariableConverter(type_converter=self.type_converter)
self.struct_var_converter = QuartusStructMemberVariableConverter(type_converter=self.type_converter)
self.weight_var_converter = StaticWeightVariableConverter(type_converter=self.type_converter)
self.inplace_var_converter = QuartusInplaceVariableConverter(type_converter=self.type_converter)

def transform(self, model, node):
io_type = node.model.config.get_config_value('IOType')

for out_name, var in node.variables.items():
if isinstance(var, InplaceVariable):
new_var = self.inplace_var_converter.convert(var, io_type)

if io_type == 'io_stream':
raise Exception('Streaming IO is not supported in Quartus.')
elif io_type == 'io_parallel':
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/backends/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def _default_config_params(self, layer):
return params

class FunctionCallTemplate(Template):
def __init__(self, layer_class, include_header=None):
def __init__(self, layer_class, include_header=[]):
if isinstance(layer_class, (list, tuple, set)):
name = '_'.join([cls.__name__.lower() for cls in layer_class])
else:
Expand Down
36 changes: 36 additions & 0 deletions hls4ml/backends/vivado/passes/inplace_reshape.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from hls4ml.model.optimizer import OptimizerPass
from hls4ml.model.layers import Reshape
from hls4ml.model.types import InplaceTensorVariable

class InplaceParallelReshape(OptimizerPass):
"""
Because in io_parallel arrays are stored 1D, reshape produces no code
"""
def match(self, node):
return isinstance(node, Reshape)

def transform(self, model, node):
if model.config.get_config_value('IOType') != 'io_parallel':
return False

outvar = node.get_output_variable()
invar = node.get_input_variable(node.inputs[0])
newoutvar = InplaceTensorVariable(outvar, invar)
node.set_attr(node.outputs[0], newoutvar)
return False

class InplaceStreamFlatten(OptimizerPass):
''' Remove Flatten layer in io_stream '''
def match(self, node):
# optimizer pass for a flatten layer (1 output dimension)
return isinstance(node, Reshape) and len(node.get_output_variable().shape) == 1

def transform(self, model, node):
if model.config.get_config_value('IOType') != 'io_stream':
return False

outvar = node.get_output_variable()
invar = node.get_input_variable(node.inputs[0])
newoutvar = InplaceTensorVariable(outvar, invar)
node.set_attr(node.outputs[0], newoutvar)
return False
20 changes: 14 additions & 6 deletions hls4ml/backends/vivado/passes/transform_types.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,38 @@

from numpy import isin
from hls4ml.model.optimizer import GlobalOptimizerPass
from hls4ml.model.types import InplaceVariable
from hls4ml.backends.fpga.fpga_types import APTypeConverter, HLSTypeConverter, StaticWeightVariableConverter, VivadoArrayVariableConverter, VivadoInplaceVariableConverter, VivadoStreamVariableConverter
from hls4ml.backends.fpga.fpga_types import (
APTypeConverter, HLSTypeConverter, StaticWeightVariableConverter,
VivadoArrayVariableConverter, VivadoInplaceArrayVariableConverter,
VivadoStreamVariableConverter, VivadoInplaceStreamVariableConverter)
from hls4ml.model.types import InplaceTensorVariable


class TransformTypes(GlobalOptimizerPass):
def __init__(self):
self.type_converter = HLSTypeConverter(precision_converter=APTypeConverter())
self.array_var_converter = VivadoArrayVariableConverter(type_converter=self.type_converter)
self.inplace_array_var_converter = VivadoInplaceArrayVariableConverter(type_converter=self.type_converter)
self.stream_var_converter = VivadoStreamVariableConverter(type_converter=self.type_converter)
self.inplace_stream_var_converter = VivadoInplaceStreamVariableConverter(type_converter=self.type_converter)
self.weight_var_converter = StaticWeightVariableConverter(type_converter=self.type_converter)
self.inplace_var_converter = VivadoInplaceVariableConverter(type_converter=self.type_converter)

def transform(self, model, node):
io_type = node.model.config.get_config_value('IOType')

for out_name, var in node.variables.items():
if isinstance(var, InplaceVariable):
new_var = self.inplace_var_converter.convert(var, io_type)
if io_type == 'io_stream':
new_var = self.stream_var_converter.convert(var)
if isinstance(var, InplaceTensorVariable):
new_var = self.inplace_stream_var_converter.convert(var)
else:
new_var = self.stream_var_converter.convert(var)
elif io_type == 'io_serial':
new_var = self.array_var_converter.convert(var, pragma='stream')
elif io_type == 'io_parallel':
if node.name in node.model.inputs:
new_var = self.array_var_converter.convert(var, pragma='reshape')
elif isinstance(var, InplaceTensorVariable):
new_var = self.inplace_array_var_converter.convert(var, pragma='')
else:
new_var = self.array_var_converter.convert(var, pragma='partition')
else:
Expand Down
2 changes: 2 additions & 0 deletions hls4ml/backends/vivado/vivado_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ def _register_flows(self):

optimization_passes = [
'vivado:optimize_pointwise_conv',
'vivado:inplace_parallel_reshape',
'vivado:inplace_stream_flatten',
]
optimization_flow = register_flow('optimize', optimization_passes, requires=[init_flow], backend=self.name)

Expand Down
6 changes: 3 additions & 3 deletions hls4ml/model/attributes.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from collections.abc import MutableMapping

from hls4ml.model.types import InplaceVariable, NamedType, TensorVariable, WeightVariable
from hls4ml.model.types import NamedType, TensorVariable, WeightVariable

class Attribute(object):
def __init__(self, name, value_type=int, default=None, configurable=False):
Expand Down Expand Up @@ -57,7 +57,7 @@ def __iter__(self):
yield key

def __setitem__(self, key, value):
if isinstance(value, (TensorVariable, InplaceVariable)):
if isinstance(value, TensorVariable):
self.layer.model.register_output_variable(key, value)
self.attributes['result_t'] = value.type
if key in self._expected_attributes and key in self.layer.outputs:
Expand Down Expand Up @@ -98,7 +98,7 @@ def __init__(self, attributes):

class VariableMapping(AttributeMapping):
def __init__(self, attributes):
super().__init__(attributes, (TensorVariable, InplaceVariable))
super().__init__(attributes, TensorVariable)

def __getitem__(self, key):
if 'out_' + key in self.attributes:
Expand Down
8 changes: 2 additions & 6 deletions hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import six

from hls4ml.model.types import NamedType
from hls4ml.model.types import TensorVariable, WeightVariable, CompressedWeightVariable, ExponentWeightVariable, InplaceVariable
from hls4ml.model.types import TensorVariable, WeightVariable, CompressedWeightVariable, ExponentWeightVariable
from hls4ml.model.types import IntegerPrecisionType, FixedPrecisionType, ExponentPrecisionType
from hls4ml.model.types import find_minimum_width

Expand Down Expand Up @@ -271,11 +271,7 @@ def initialize(self):
shape = shape[1:]
dims = ['N_SIZE_{}_{}'.format(i, self.index) for i in range(1, len(shape) + 1)]

out_name = self.outputs[0]
proxy = self.get_input_variable()
out = InplaceVariable(shape, dims, proxy)

self.set_attr(out_name, out)
self.add_output_variable(shape, dims)

class Dense(Layer):
_expected_attributes = [
Expand Down
26 changes: 10 additions & 16 deletions hls4ml/model/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,22 +207,16 @@ def size_cpp(self):
#TODO get rid of size_cpp() (and dim_names)
return '*'.join([str(k) for k in self.dim_names])

class InplaceVariable(Variable):
def __init__(self, shape, dim_names, proxy):
self.shape = shape
self.dim_names = dim_names
self.type = proxy.type
self.name = proxy.name
self.size = proxy.size

def get_shape(self):
return zip(self.dim_names, self.shape)

def size_cpp(self):
return '*'.join([str(k) for k in self.dim_names])

def definition_cpp(self, name_suffix='', as_reference=False):
return None
class InplaceTensorVariable(TensorVariable):
'''A TensorVariable that is just a link to another'''
def __init__(self, tv, input_var):
'''
Always created with a passed in TensorVariable tv
and the input_var variable it should link to.
'''
self.__dict__.update(tv.__dict__)
self.type = input_var.type
self.input_var = input_var

class WeightVariable(Variable):
def __init__(self, var_name, type_name, precision, data, quantizer=None, **kwargs):
Expand Down
36 changes: 36 additions & 0 deletions test/pytest/test_reshape.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
""" Test that reshape is properly handled by optimizers.
"""

import pytest
import hls4ml
import tensorflow as tf
import numpy as np
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax


def test_reshape_parallel():
model = tf.keras.models.Sequential([
tf.keras.layers.Input((10)),
tf.keras.layers.Dense(10*3),
tf.keras.layers.Reshape((10,3)),
tf.keras.layers.ReLU()
])
model.compile(optimizer='adam', loss='mse')
config = hls4ml.utils.config_from_keras_model(model)
output_dir = 'hls4mlprj_reshape_parallel'
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir)
hls_model.compile()

def test_reshape_stream():
model = tf.keras.models.Sequential([
tf.keras.layers.Input((10)),
tf.keras.layers.Dense(10*3),
tf.keras.layers.Reshape((10,3)),
tf.keras.layers.ReLU()
])
model.compile(optimizer='adam', loss='mse')
config = hls4ml.utils.config_from_keras_model(model)
output_dir = 'hls4mlprj_reshape_stream'
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir, io_type='io_stream')
hls_model.compile()