import hls4ml import torch import sys sys.path.append('./01_Library/') from Model import * from tariner import * model_path = './model/CNN_hw_230106_det1_state_dict_common_model/CNN_5_acc_96.15.pth' pytorch_model = CNN_hw() pytorch_model.load_state_dict(torch.load(model_path, map_location='cpu')) pytorch_model.eval() config = hls4ml.utils.config.config_from_pytorch_model(pytorch_model, granularity='model', backend='Vivado', default_precision='ap_int<8>', \ default_reuse_factor=64, inputs_channel_last=False, transpose_outputs=True) # inputs_channel_last=False, inputs will be transposed to cl internally. As in pytorch it is channel_first. # transpose_outputs=True, If False, outputs needs to be transposed manually. input_shape = [[None,1,8,8]] # The shape of the input tensor. First element is the batch size, needs to be None. # [batch(NONE), channel, height, width], pytorch is channel_first hls_model = hls4ml.converters.convert_from_pytorch_model(pytorch_model, input_shape, output_dir='my-hls-test', project_name='myproject', \ input_data_tb=None, output_data_tb=None, backend='Vivado', hls_config=config, part='xc7k325tffg900-2', \ clock_period=5) # synthesize model hls_model.build(csim=False) # report results hls4ml.report.read_vivado_report('my-hls-test/')