@@ -38,7 +38,7 @@ bie_path = km.analysis(
38
38
input_mapping,
39
39
output_dir = " /data1/kneron_flow" ,
40
40
datapath_bitwidth_mode = " int8" ,
41
- weightpath_bitwidth_mode = " int8" ,
41
+ weight_bitwidth_mode = " int8" ,
42
42
model_in_bitwidth_mode = " int8" ,
43
43
model_out_bitwidth_mode = " int8" ,
44
44
cpu_node_bitwidth_mode = " int8"
@@ -57,7 +57,7 @@ bie_path = km.analysis(
57
57
input_mapping,
58
58
output_dir = " /data1/kneron_flow" ,
59
59
datapath_bitwidth_mode = " mix light" ,
60
- weightpath_bitwidth_mode = " mix light" ,
60
+ weight_bitwidth_mode = " mix light" ,
61
61
model_in_bitwidth_mode = " int16" ,
62
62
model_out_bitwidth_mode = " int16" ,
63
63
cpu_node_bitwidth_mode = " int16"
@@ -75,7 +75,7 @@ bie_path = km.analysis(
75
75
input_mapping,
76
76
output_dir = " /data1/kneron_flow" ,
77
77
datapath_bitwidth_mode = " mixbw" ,
78
- weightpath_bitwidth_mode = " mixbw" ,
78
+ weight_bitwidth_mode = " mixbw" ,
79
79
model_in_bitwidth_mode = " int16" ,
80
80
model_out_bitwidth_mode = " int16" ,
81
81
cpu_node_bitwidth_mode = " int16" ,
@@ -100,7 +100,7 @@ bie_path = km.analysis(
100
100
input_mapping,
101
101
output_dir = " /data1/kneron_flow" ,
102
102
datapath_bitwidth_mode = " int16" ,
103
- weightpath_bitwidth_mode = " int16" ,
103
+ weight_bitwidth_mode = " int16" ,
104
104
model_in_bitwidth_mode = " int16" ,
105
105
model_out_bitwidth_mode = " int16" ,
106
106
cpu_node_bitwidth_mode = " int16"
@@ -143,4 +143,4 @@ export MIXBW_DEBUG=True
143
143
144
144
145
145
146
- By following this workflow, developers can systematically optimize models for deployment on Kneron NPUs while balancing accuracy and performance.
146
+ By following this workflow, developers can systematically optimize models for deployment on Kneron NPUs while balancing accuracy and performance.
0 commit comments