Skip to content

PyTorch.Geometric to HLS4ML #379

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 25 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
4dc4846
pyg_to_hls
abdelabd Jun 13, 2021
5298c9b
added (started) pyg block_handlers
Aug 17, 2021
5be3da9
added pyg graph_handlers
Aug 17, 2021
c4071b9
added 'check_forward_dict'
Aug 17, 2021
1f1e42f
updated error handling
Aug 17, 2021
0d1b8a7
updated naming convention: Aggregate-->EdgeAggregate, aggregate-->edg…
Aug 24, 2021
5568e06
wrong array name in #pragma HLS ARRAY_PARTITION
Aug 24, 2021
eda254f
dict no longer necessary (used in development)
Sep 1, 2021
84637dd
main difference between this branch and 'pyg_to_hls_rebase'
Sep 1, 2021
f105dc3
no longer initialize edge_index, just use edge_index_1D directly
Sep 3, 2021
e8cd189
added docstring to convert_from_pyg_model()
Sep 4, 2021
09af4e3
aesthetics
Sep 4, 2021
fd4cd9f
updated error handling
Sep 17, 2021
54f7ec4
adapting max-aggregation to work for float
Sep 17, 2021
f75901a
improved max-aggregation initialization
Sep 21, 2021
aa2a9de
improved max-aggregation initialization (again)
Sep 21, 2021
a220358
minimized code-copies between EdgeBlock and NodeBlock
Sep 23, 2021
46d5695
removed unnecessary parameters
Sep 25, 2021
cca97c3
add pytest for pyg
jmduarte Dec 27, 2021
4b1ed68
actually add test this time
jmduarte Dec 28, 2021
ca49221
moved handling of get_weights_data() from HLSModel to PyTorchModelReader
Feb 6, 2022
5c36aad
special handling for Pyg dim_names wasn't necessary
Feb 6, 2022
7e7ebf1
If hls4ml_config.yml not dump-able, just raise warning
Feb 6, 2022
bc93393
write_yml fix to delete model.config.config['PytorchModel'] if yaml.d…
Feb 8, 2022
570af9d
try+except-->finally
Feb 8, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 77 additions & 0 deletions contrib/interaction_network.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import torch
import torch_geometric
from torch import Tensor

import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.nn import MessagePassing
from torch.nn import Sequential as Seq, Linear, ReLU, Sigmoid

class RelationalModel(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(RelationalModel, self).__init__()

self.layers = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
)

def forward(self, m):
return self.layers(m)

class ObjectModel(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(ObjectModel, self).__init__()

self.layers = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
)

def forward(self, C):
return self.layers(C)


class InteractionNetwork(MessagePassing):
def __init__(self, aggr='add', flow='source_to_target', hidden_size=40):
super(InteractionNetwork, self).__init__(aggr=aggr,
flow=flow)
self.R1 = RelationalModel(10, 4, hidden_size)
self.O = ObjectModel(7, 3, hidden_size)
self.R2 = RelationalModel(10, 1, hidden_size)
self.E: Tensor = Tensor()

def forward(self, x: Tensor, edge_index: Tensor, edge_attr: Tensor) -> Tensor:

# propagate_type: (x: Tensor, edge_attr: Tensor)
x_tilde = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=None)

if self.flow == 'source_to_target':
r = edge_index[1]
s = edge_index[0]
else:
r = edge_index[0]
s = edge_index[1]

m2 = torch.cat([x_tilde[r],
x_tilde[s],
self.E], dim=1)
return torch.sigmoid(self.R2(m2))

def message(self, x_i, x_j, edge_attr):
# x_i --> incoming
# x_j --> outgoing
m1 = torch.cat([x_i, x_j, edge_attr], dim=1)
self.E = self.R1(m1)
return self.E

def update(self, aggr_out, x):
c = torch.cat([x, aggr_out], dim=1)
return self.O(c)
133 changes: 132 additions & 1 deletion hls4ml/converters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#----------Make converters available if the libraries can be imported----------#
try:
from hls4ml.converters.pytorch_to_hls import pytorch_to_hls, get_supported_pytorch_layers, register_pytorch_layer_handler
from hls4ml.converters.pyg_to_hls import pyg_to_hls, get_supported_pyg_blocks, register_pyg_block_handler
__pytorch_enabled__ = True
except ImportError:
warnings.warn("WARNING: Pytorch converter is not enabled!")
Expand All @@ -31,7 +32,7 @@
__tensorflow_enabled__ = False

#----------Layer handling register----------#
model_types = ['keras', 'pytorch', 'onnx']
model_types = ['keras', 'pytorch', 'onnx', 'pyg']

for model_type in model_types:
for module in os.listdir(os.path.dirname(__file__) + '/{}'.format(model_type)):
Expand All @@ -52,6 +53,8 @@
register_pytorch_layer_handler(layer, func)
elif model_type == 'onnx':
register_onnx_layer_handler(layer, func)
elif model_type == 'pyg':
register_pyg_block_handler(layer, func)

except ImportError:
continue
Expand Down Expand Up @@ -297,6 +300,134 @@ def convert_from_pytorch_model(model, input_shape, output_dir='my-hls-test', pro

return pytorch_to_hls(config)

def check_forward_dict(model, forward_dictionary):
for key in forward_dictionary:
try:
block = getattr(model, key)
except AttributeError:
raise AttributeError(f'Model is missing module "{key}" that is present in the provided forward dictionary; Check compatability')

def convert_from_pyg_model(model, forward_dictionary, n_node, node_dim,
n_edge, edge_dim, activate_final=None,
output_dir='my-hls-test', project_name='myproject',
part='xcku115-flvb2104-2-i', clock_period=5, io_type='io_parallel', hls_config={}):
check_forward_dict(model, forward_dictionary)
"""

Convert a Pytorch.Geometric model to an hls model.

Parameters
----------
model : Pytorch.geometric model object.
Model to be converted to hls model object.
n_node, n_edge: int, int
These parameters define the size of the graphs that your hls GNN
accepts as input. Inputs must be truncated or zero-padded to this
size before feeding them to your model. This is necessary because
each layer of the hls/hardware implementation has a fixed size
and cannot be resized.
node_dim, edge_dim: int, int
node_dim defines the length of the vector used to represent each
node in the graph-input. For example, if each node is represented
as a 1x3 vector, node_dim=3.
Likewise, edge_dim defines the length of the vector used to
represent each edge in the graph-input.

forward_dictionary: OrderedDict object of the form {string: string}
Use this dictionary to define the order in which your model's
forward() method calls on the model's submodules. The keys
of the dictionary should be the names of your model's submodules, and the
value stored in each key should indicate whether that submodule is an
'EdgeBlock' (i.e. it predicts messages/edge-updates) or whether its a
'NodeBlock' (i.e. it predicts node-updates).

For example, consider this InteractionNetwork (https://github.com/GageDeZoort/interaction_network_paper/blob/pytorch_geometric/models/interaction_network.py),
whose forward() method calls on its submodules in the following order:
1. An EdgeBlock named 'R1'
2. A NodeBlock named 'O'
3. An EdgeBlock named 'R2'

One would define its forward dictionary as such:
>>> forward_dictionary = OrderedDict()
>>> forward_dictionary['R1'] = 'EdgeBlock'
>>> forward_dictionary['O'] = 'NodeBlock'
>>> forward_dictionary['R2'] = 'EdgeBlock'

It is really important to define the submodules in the same order with which the
forward() method calls on them. hls4ml has no other way of inferring this order.

activate_final: string, optional
If the activation of the final output is not already a layer in the corresponding
submodule, name the type of the activation function here. In the preceding example,
one would pass the value 'sigmoid', because the final output of the model
is the sigmoid-activated output of 'R2' (the last submodule called by the
forward() method). In other words, the model returns torch.sigmoid(self.R2(m2)).
Other accepted values for this parameter include:
['linear', 'relu', 'elu', 'selu', 'prelu', 'leaky_relu', 'softmax', 'tanh', 'softplus',
'softsign', 'hard_sigmoid','thresholded_relu', 'binary_tanh', 'ternary_tanh']
output_dir : string, optional
Output directory to write hls codes.
project_name : string, optional
hls project name.
part : string, optional
The particular FPGA part number that you are considering.
clock_period : int, optional
The clock period, in ns, at which your algorithm runs.
io_type : string, optional
Your options are 'io_parallel' or 'io_serial' where this really
defines if you are pipelining your algorithm or not.
hls_config : dict, optional
Additional configuration dictionary for hls model.

Returns
-------
hls_model : hls4ml model object.

See Also
--------
hls4ml.convert_from_pytorch_model, hls4ml.convert_from_keras_model,
hls4ml.convert_from_onnx_model

Example
--------
>>> import hls4ml
>>> config = hls4ml.utils.config_from_pyg_model(model, granularity='model')
>>>
>>> forward_dictionary = OrderedDict()
>>> forward_dictionary['R1'] = 'EdgeBlock'
>>> forward_dictionary['O'] = 'NodeBlock'
>>> forward_dictionary['R2'] = 'EdgeBlock'
>>> graph_dimensions = {"n_node": 112, "node_dim": 3, "n_edge": 148, "edge_dim": 4}
>>> hls_model = hls4ml.converters.convert_from_pyg_model(model, forward_dictionary,
**graph_dimensions,
activate_final='sigmoid'
hls_config=config)

"""

config = create_config(
output_dir=output_dir,
project_name=project_name,
part=part,
clock_period=clock_period,
io_type=io_type
)

config['PytorchModel'] = model
config['InputShape'] = {
'NodeAttr': [n_node, node_dim],
'EdgeAttr': [n_edge, edge_dim],
'EdgeIndex': [n_edge, 2]
}
config['ForwardDictionary'] = forward_dictionary
config['ActivateFinal'] = activate_final

model_config = hls_config.get('Model', None)
config['HLSConfig']['Model'] = _check_model_config(model_config)

_check_hls_config(config, hls_config)

return pyg_to_hls(config)

def convert_from_onnx_model(model, output_dir='my-hls-test', project_name='myproject', input_data_tb=None,
output_data_tb=None, backend='Vivado', board=None, part=None, clock_period=5, io_type='io_parallel',
Expand Down
Empty file.
60 changes: 60 additions & 0 deletions hls4ml/converters/pyg/interaction_network_blocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import numpy as np
from hls4ml.converters.pyg_to_hls import pyg_handler

def parse_GraphBlock(block_name, config, n_node, n_edge, node_dim, edge_dim):
layer_dict = {
"name": block_name,
"n_node": n_node,
"n_edge": n_edge,
"node_dim": node_dim,
"edge_dim": edge_dim,
}

# get n_layers, out_dim
model = config['PytorchModel']
torch_block = getattr(model, block_name)
try:
torch_layers = torch_block.layers._modules
except AttributeError:
torch_layers = torch_block._modules

lcount = 0
for lname, l in torch_layers.items():
if l.__class__.__name__=="Linear":
lcount += 1
last_layer = l
layer_dict["n_layers"] = lcount
layer_dict["out_dim"] = last_layer.out_features
return layer_dict

@pyg_handler('NodeBlock')
def parse_NodeBlock(block_name, config, update_dict, index, n_node, n_edge, node_dim, edge_dim):
layer_dict = parse_GraphBlock(block_name, config, n_node, n_edge, node_dim, edge_dim)
layer_dict["class_name"] = "NodeBlock"
layer_dict["inputs"] = [update_dict["last_node_update"], update_dict["last_edge_aggr_update"]]
layer_dict["outputs"] = [f"layer{index}_out"]
update_dict["last_node_update"] = f"layer{index}_out"
return layer_dict, update_dict

@pyg_handler('EdgeBlock')
def parse_EdgeBlock(block_name, config, update_dict, index, n_node, n_edge, node_dim, edge_dim):
layer_dict = parse_GraphBlock(block_name, config, n_node, n_edge, node_dim, edge_dim)
layer_dict["class_name"] = "EdgeBlock"
layer_dict["inputs"] = [update_dict["last_node_update"], update_dict["last_edge_update"], "edge_index"]
layer_dict["outputs"] = [f"layer{index}_out"]
update_dict["last_edge_update"] = f"layer{index}_out"
return layer_dict, update_dict

@pyg_handler('EdgeAggregate')
def parse_EdgeAggregate(block_name, config, update_dict, index, n_node, n_edge, node_dim, edge_dim):
layer_dict = {"name": f"aggr{index}",
"class_name": "EdgeAggregate",
"n_node": n_node,
"n_edge": n_edge,
"node_dim": node_dim,
"edge_dim": edge_dim,
"out_dim": edge_dim,
"inputs": [update_dict["last_edge_update"], "edge_index"],
"outputs": [f"layer{index}_out"]}
update_dict["last_edge_aggr_update"] = f"layer{index}_out"
return layer_dict, update_dict
Loading