Skip to content
22 changes: 18 additions & 4 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,12 +90,26 @@ def convert(args):
for block in inference_program.blocks:
for op in block.ops:
if op.type in ops.node_maker:
op_attrs = dict([(name, op.attr(name))
for name in op.attr_names
]) if op.attr_names is not None else None
op_inputs = dict([(name, op.input(name))
for name in op.input_names])
op_outputs = dict([(name, op.output(name))
for name in op.output_names])

# Append some customized arguments of the node maker here
if op.type == 'conv2d':
kernel_shape = fluid.executor.fetch_var(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since this is all static, we could make fetch_var available inside the ops module, so we don't have to such a conditional here

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agree. We should make convert.py more clean. So I change to pass operator and scope as arguments.

op.input('Filter')[0].decode('string_escape'),
inference_scope).shape
op_attrs['kernel_shape'] = kernel_shape

# TODO(kuke): deal with the corner case that vars in
# different blocks have the same name
node_proto = ops.node_maker[op.type](
inputs=op.input_arg_names,
attrs=op.attr_names,
outputs=op.output_arg_names)
node_proto = ops.node_maker[op.type](inputs=op_inputs,
attrs=op_attrs,
outputs=op_outputs)

onnx_nodes.append(node_proto)
else:
Expand Down
77 changes: 57 additions & 20 deletions fluid_onnx/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,12 @@ def abs_op():


def add_op(inputs, attrs, outputs):
return make_node('Add', inputs=inputs, outputs=outputs, broadcast=1)
return make_node(
'Add',
inputs=inputs['X'] + inputs['Y'],
outputs=outputs['Out'],
axis=attrs['axis'],
broadcast=1)


def and_op():
Expand All @@ -81,8 +86,16 @@ def averagepool_op():
pass


def batchnorm_op():
pass
def batchnorm_op(inputs, attrs, outputs):
bn_op = make_node(
'BatchNormalization',
inputs=inputs['X'] + inputs['Scale'] + inputs['Bias'] + inputs['Mean'] +
inputs['Variance'],
outputs=outputs['Y'],
is_test=attrs['is_test'],
epsilon=attrs['epsilon'],
momentum=attrs['momentum'])
return bn_op


def cast_op():
Expand All @@ -105,11 +118,17 @@ def constant_op():
pass


def conv_op():
"""
Need to support broadcast.
"""
pass
def conv_op(inputs, attrs, outputs):
conv2d = make_node(
'Conv',
inputs=inputs['Input'] + inputs['Filter'],
outputs=outputs['Output'],
dilations=attrs['dilations'],
kernel_shape=attrs['kernel_shape'][2:],
strides=attrs['strides'],
group=attrs['groups'],
pads=attrs['paddings'])
return conv2d


def convtranspose_op():
Expand Down Expand Up @@ -225,7 +244,8 @@ def lppool_op():


def matmul_op(inputs, attrs, outputs):
return make_node('MatMul', inputs=inputs, outputs=outputs)
return make_node(
'MatMul', inputs=inputs['X'] + inputs['Y'], outputs=outputs['Out'])


def max_op():
Expand Down Expand Up @@ -281,6 +301,25 @@ def pad_op():
pass


def pool2d_op(inputs, attrs, outputs):
if attrs['global_pooling'] is False:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

op_type = {'max': 'MaxPool', 'ave': 'AveragePool'}
pool2d = make_node(
op_type[attrs['pooling_type']],
inputs=inputs['X'],
outputs=outputs['Out'],
kernel_shape=attrs['ksize'],
strides=attrs['strides'],
pads=attrs['paddings'] + attrs['paddings'], )
else:
op_type = {'max': 'GlobalMaxPool', 'ave': 'GlobalAveragePool'}
pool2d = make_node(
op_type[attrs['pooling_type']],
inputs=inputs['X'],
outputs=outputs['Out'])
return pool2d


def pow_op():
pass

Expand Down Expand Up @@ -349,8 +388,8 @@ def reducesumsquare_op():
pass


def relu_op():
pass
def relu_op(inputs, attrs, outputs):
return make_node('Relu', inputs=inputs['X'], outputs=outputs['Out'])


def reshape_op():
Expand All @@ -377,8 +416,8 @@ def slice_op():
pass


def softmax_op():
pass
def softmax_op(inputs, attrs, outputs):
return make_node('Softmax', inputs=inputs['X'], outputs=outputs['Out'])


def softplus_op():
Expand Down Expand Up @@ -444,9 +483,6 @@ def xor_op():
# Reference for paddle operator availability taken from:
# https://github.com/PaddlePaddle/Paddle/issues/8028

# ONNX Ops that use multiple Paddle ops are keyed by '<op1>,<op2>' fed into the
# modifier.

node_maker = {
# Paddle op name : (ONNX op name, modifier)
'abs': ('Abs', abs_op),
Expand All @@ -456,13 +492,13 @@ def xor_op():
# 'ArgMax', NEEDS ATTENTION.
# 'ArgMin', NEEDS ATTENTION.
'': ('AveragePool', averagepool_op),
'batch_norm': ('BatchNormalization', batchnorm_op),
'batch_norm': batchnorm_op,
'cast': ('Cast', cast_op),
# 'Ceil', NEEDS ATTENTION.
'cast': ('Clip', clip_op),
'concat': ('Concat', concat_op),
',': ('Constant', constant_op),
'conv': ('Conv', conv_op),
'conv2d': conv_op,

# Need to continue the mapping below.
'': 'ConvTranspose',
Expand Down Expand Up @@ -504,6 +540,7 @@ def xor_op():
'': 'Or',
'': 'PRelu',
'': 'Pad',
'pool2d': pool2d_op,
'': 'Pow',
',': 'RNN',
'': 'RandomNormal',
Expand All @@ -521,14 +558,14 @@ def xor_op():
# 'ReduceProd', NEEDS ATTENTION.
'': 'ReduceSum',
',': 'ReduceSumSquare',
'': 'Relu',
'relu': relu_op,
'': 'Reshape',
# 'Selu', NEEDS ATTENTION.
'': 'Shape',
'': 'Sigmoid',
'': 'Size',
# 'Slice', NEEDS ATTENTION.
'': 'Softmax',
'softmax': softmax_op,
'': 'Softplus',
'': 'Softsign',
'': 'SpaceToDepth',
Expand Down