Skip to content

Commit a25fbcd

Browse files
committed
[Migration][DO NOT MERGE] Fix linting
ghstack-source-id: c243fe3 Pull Request resolved: #1333
1 parent fb08955 commit a25fbcd

File tree

3 files changed

+18
-10
lines changed

3 files changed

+18
-10
lines changed

.lintrunner.toml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,19 @@ exclude_patterns = [
4646
'tests/**', # Skip linting test files for speed
4747
'onnxscript/**/*_test.py', # Skip linting test files for speed
4848
'onnxscript/function_libs/torch_lib/ops/**', # Operators typing do not play well with mypy
49+
'onnxscript/optimizer/evaluator.py', # FIXME
50+
'onnxscript/optimizer/constant_folding.py', # FIXME
51+
'onnxscript/_legacy_ir/__init__.py', # FIXME
52+
'onnxscript/rewriter/onnxruntime/transformers/fastgelu.py', # FIXME
53+
'onnxscript/rewriter/onnxruntime/instance_to_group_normalization.py', # FIXME
54+
'onnxscript/rewriter/function_rule.py', # FIXME
55+
'onnxscript/_legacy_ir/irbuilder.py', # FIXME
56+
'onnxscript/optimizer/fold_constants_v0.py', # FIXME
57+
'onnxscript/rewriter/pattern.py', # FIXME
58+
'onnxscript/rewriter/onnxruntime/transformers/multihead_attention.py', # FIXME
59+
'onnxscript/tools/function_unittest_producer.py', # FIXME
60+
'onnxscript/_legacy_ir/visitor.py', # FIXME
61+
'onnxscript/rewriter/onnxruntime/transformers/layernorm.py', # FIXME
4962
]
5063
command = [
5164
'python',

onnxscript/rewriter/onnxruntime/softmax.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
def softmax_with_fp32_upcast(input, axis):
1616
upcast = op.Cast(input, to=onnx.TensorProto.FLOAT)
17-
softmax = op.Softmax(upcast, axis=axis)
17+
softmax = op.Softmax(upcast, axis=axis) # pylint: disable=redefined-outer-name
1818
return op.Cast(softmax, to=onnx.TensorProto.FLOAT16)
1919

2020

@@ -24,7 +24,7 @@ def softmax(input, axis):
2424

2525
def softmax_with_fp32_upcast_without_axis(input):
2626
upcast = op.Cast(input, to=onnx.TensorProto.FLOAT)
27-
softmax = op.Softmax(upcast)
27+
softmax = op.Softmax(upcast) # pylint: disable=redefined-outer-name
2828
return op.Cast(softmax, to=onnx.TensorProto.FLOAT16)
2929

3030

@@ -43,13 +43,15 @@ def check_if_fp16_input(match_bindings: dict[str, ir.Value | Any]) -> bool:
4343
return input_val.element_type == onnx.TensorProto.FLOAT16
4444

4545

46+
# pylint: disable=pointless-string-statement
4647
"""
4748
This is an onnxruntime specific pattern. Softmax upcast is a common
4849
pattern observed in transformers models to prevent overflow. However
4950
this is not required since onnxruntime implementation already takes
5051
overflow into account. Hence it is safe to remove the surrounding casts
5152
to free up memory as well as saving performance.
5253
"""
54+
# pylint: enable=pointless-string-statement
5355
rules = pattern.RewriteRuleSet(
5456
[
5557
pattern.RewriteRule(softmax_with_fp32_upcast, softmax, check_if_fp16_input),

onnxscript/rewriter/pattern_test.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,6 @@ def test_concat_in_function(self):
210210

211211
class RewriteRuleTest(unittest.TestCase):
212212
def test_commute(self):
213-
op = pattern.onnxop
214-
215213
def add_0(x):
216214
return x + 0
217215

@@ -239,8 +237,6 @@ def identity(x):
239237
self.assertEqual(nodes[1].op_type, "Identity")
240238

241239
def test_const_value(self):
242-
op = pattern.onnxop
243-
244240
def reshape(x, newshape):
245241
return op.Reshape(x, newshape)
246242

@@ -259,10 +255,7 @@ def _check_for_redundant_reshape(x, newshape):
259255

260256
if len(oldshape) != len(newshape):
261257
return False
262-
for d1, d2 in zip(oldshape, newshape):
263-
if d1 != d2 and d2 != -1: # noqa: PLR1714
264-
return False
265-
return True
258+
return all(not (d1 != d2 and d2 != -1) for d1, d2 in zip(oldshape, newshape)) # pylint: disable=consider-using-in
266259

267260
def check_for_redundant_reshape(bindings):
268261
return _check_for_redundant_reshape(**bindings)

0 commit comments

Comments
 (0)