Skip to content

Commit 7f76ab4

Browse files
committed
DOC fix --amend --signoff
Signed-off-by: V-E-D <[email protected]>
1 parent 34333f4 commit 7f76ab4

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

neural_compressor/adaptor/pytorch.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4174,9 +4174,9 @@ def _get_module_scale_zeropoint(self, model, tune_cfg, prefix=""):
41744174
# Improved scale detection logic
41754175
if "scale" in node.target and not any(exclude in node.target for exclude in ["layer_scale", "gamma"]):
41764176
try:
4177-
tune_cfg["get_attr"][sub_name] = float(getattr(model, node.target))
4178-
except ValueError:
4179-
logger.warning(f"Could not convert {node.target} to float, skipping...")
4177+
tune_cfg["get_attr"][sub_name] = getattr(model, node.target).tolist()
4178+
except Exception as e:
4179+
logger.warning(f"Could not convert {node.target} to list, skipping... Error: {str(e)}")
41804180
elif "zero_point" in node.target:
41814181
tune_cfg["get_attr"][sub_name] = int(getattr(model, node.target))
41824182
else:

0 commit comments

Comments
 (0)