diff options
author | Evan Cheng <evan.cheng@apple.com> | 2012-07-12 01:45:35 +0000 |
---|---|---|
committer | Evan Cheng <evan.cheng@apple.com> | 2012-07-12 01:45:35 +0000 |
commit | 79590b8edffd403d93c764887a4f0ad4f2612914 (patch) | |
tree | 369b97c645dc9744eb827927a9ecd66ed3b10a66 /test/Transforms | |
parent | e96ce46b4deabec1110fd9d58213cb884a33b61c (diff) | |
download | external_llvm-79590b8edffd403d93c764887a4f0ad4f2612914.zip external_llvm-79590b8edffd403d93c764887a4f0ad4f2612914.tar.gz external_llvm-79590b8edffd403d93c764887a4f0ad4f2612914.tar.bz2 |
Instcombine was transforming:
%shr = lshr i64 %key, 3
%0 = load i64* %val, align 8
%sub = add i64 %0, -1
%and = and i64 %sub, %shr
ret i64 %and
to:
%shr = lshr i64 %key, 3
%0 = load i64* %val, align 8
%sub = add i64 %0, 2305843009213693951
%and = and i64 %sub, %shr
ret i64 %and
The demanded bit optimization is actually a pessimization because add -1 would
be codegen'ed as a sub 1. Teach the demanded constant shrinking optimization
to check for negated constant to make sure it is actually reducing the width
of the constant.
rdar://11793464
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160101 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms')
-rw-r--r-- | test/Transforms/InstCombine/2012-07-11-AddSubDemandedBits.ll | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/test/Transforms/InstCombine/2012-07-11-AddSubDemandedBits.ll b/test/Transforms/InstCombine/2012-07-11-AddSubDemandedBits.ll new file mode 100644 index 0000000..d62a886 --- /dev/null +++ b/test/Transforms/InstCombine/2012-07-11-AddSubDemandedBits.ll @@ -0,0 +1,18 @@ +; RUN: opt < %s -instcombine -S | FileCheck %s + +; When shrinking demanded constant operand of an add instruction, keep in +; mind the opcode can be changed to sub and the constant negated. Make sure +; the shrinking the constant would actually reduce the width. +; rdar://11793464 + +define i64 @t(i64 %key, i64* %val) nounwind { +entry: +; CHECK: @t +; CHECK-NOT: add i64 %0, 2305843009213693951 +; CHECK: add i64 %0, -1 + %shr = lshr i64 %key, 3 + %0 = load i64* %val, align 8 + %sub = sub i64 %0, 1 + %and = and i64 %sub, %shr + ret i64 %and +} |