Skip to content
This repository was archived by the owner on Feb 5, 2019. It is now read-only.

Commit 85ec369

Browse files
committed
[InstCombine] Fix negative GEP offset evaluation for 32-bit pointers
This fixes https://bugs.llvm.org/show_bug.cgi?id=39908. The evaluateGEPOffsetExpression() function simplifies GEP offsets for use in comparisons against zero, basically by converting X*Scale+Offset==0 to X+Offset/Scale==0 if Scale divides Offset. However, before this is done, Offset is masked down to the pointer size. This results in incorrect results for negative Offsets, because we basically end up dividing the 32-bit offset *zero* extended to 64-bit bits (rather than sign extended). Fix this by explicitly sign extending the truncated value. Differential Revision: https://reviews.llvm.org/D55449 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@348987 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 47d569d commit 85ec369

File tree

2 files changed

+52
-5
lines changed

2 files changed

+52
-5
lines changed

lib/Transforms/InstCombine/InstCombineCompares.cpp

+3-5
Original file line numberDiff line numberDiff line change
@@ -522,11 +522,9 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
522522
}
523523

524524
// Otherwise, there is an index. The computation we will do will be modulo
525-
// the pointer size, so get it.
526-
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
527-
528-
Offset &= PtrSizeMask;
529-
VariableScale &= PtrSizeMask;
525+
// the pointer size.
526+
Offset = SignExtend64(Offset, IntPtrWidth);
527+
VariableScale = SignExtend64(VariableScale, IntPtrWidth);
530528

531529
// To do this transformation, any constant index must be a multiple of the
532530
// variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
+49
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2+
; RUN: opt < %s -instcombine -S | FileCheck %s
3+
4+
target datalayout = "p:32:32"
5+
6+
%S = type { [2 x i32] }
7+
8+
define i1 @test([0 x %S]* %p, i32 %n) {
9+
; CHECK-LABEL: @test(
10+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 1
11+
; CHECK-NEXT: ret i1 [[CMP]]
12+
;
13+
%start.cast = bitcast [0 x %S]* %p to %S*
14+
%end = getelementptr inbounds [0 x %S], [0 x %S]* %p, i32 0, i32 %n, i32 0, i32 0
15+
%end.cast = bitcast i32* %end to %S*
16+
%last = getelementptr inbounds %S, %S* %end.cast, i32 -1
17+
%cmp = icmp eq %S* %last, %start.cast
18+
ret i1 %cmp
19+
}
20+
21+
; Same test using 64-bit indices.
22+
define i1 @test64([0 x %S]* %p, i64 %n) {
23+
; CHECK-LABEL: @test64(
24+
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
25+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
26+
; CHECK-NEXT: ret i1 [[CMP]]
27+
;
28+
%start.cast = bitcast [0 x %S]* %p to %S*
29+
%end = getelementptr inbounds [0 x %S], [0 x %S]* %p, i64 0, i64 %n, i32 0, i64 0
30+
%end.cast = bitcast i32* %end to %S*
31+
%last = getelementptr inbounds %S, %S* %end.cast, i64 -1
32+
%cmp = icmp eq %S* %last, %start.cast
33+
ret i1 %cmp
34+
}
35+
36+
; Here the offset overflows and is treated modulo 2^32. This is UB.
37+
define i1 @test64_overflow([0 x %S]* %p, i64 %n) {
38+
; CHECK-LABEL: @test64_overflow(
39+
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
40+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
41+
; CHECK-NEXT: ret i1 [[CMP]]
42+
;
43+
%start.cast = bitcast [0 x %S]* %p to %S*
44+
%end = getelementptr inbounds [0 x %S], [0 x %S]* %p, i64 0, i64 %n, i32 0, i64 8589934592
45+
%end.cast = bitcast i32* %end to %S*
46+
%last = getelementptr inbounds %S, %S* %end.cast, i64 -1
47+
%cmp = icmp eq %S* %last, %start.cast
48+
ret i1 %cmp
49+
}

0 commit comments

Comments
 (0)