aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2012-07-16 19:35:43 +0000
committerEvan Cheng <evan.cheng@apple.com>2012-07-16 19:35:43 +0000
commit98819c9d1e3b929e9ebab0e8cd3edb31aad21bd8 (patch)
tree382bd7339c243a3f753335d08a9d040a054aab37 /lib
parentc76fa8937d483ae83f94d0793254dbab78877687 (diff)
downloadexternal_llvm-98819c9d1e3b929e9ebab0e8cd3edb31aad21bd8.zip
external_llvm-98819c9d1e3b929e9ebab0e8cd3edb31aad21bd8.tar.gz
external_llvm-98819c9d1e3b929e9ebab0e8cd3edb31aad21bd8.tar.bz2
For something like
uint32_t hi(uint64_t res) { uint_32t hi = res >> 32; return !hi; } llvm IR looks like this: define i32 @hi(i64 %res) nounwind uwtable ssp { entry: %lnot = icmp ult i64 %res, 4294967296 %lnot.ext = zext i1 %lnot to i32 ret i32 %lnot.ext } The optimizer has optimize away the right shift and truncate but the resulting constant is too large to fit in the 32-bit immediate field. The resulting x86 code is worse as a result: movabsq $4294967296, %rax ## imm = 0x100000000 cmpq %rax, %rdi sbbl %eax, %eax andl $1, %eax This patch teaches the x86 lowering code to handle ult against a large immediate with trailing zeros. It will issue a right shift and a truncate followed by a comparison against a shifted immediate. shrq $32, %rdi testl %edi, %edi sete %al movzbl %al, %eax It also handles a ugt comparison against a large immediate with trailing bits set. i.e. X > 0x0ffffffff -> (X >> 32) >= 1 rdar://11866926 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160312 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp44
1 files changed, 44 insertions, 0 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index c55a1ef..1d72aad 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -3059,6 +3059,50 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
RHS = DAG.getConstant(0, RHS.getValueType());
return X86::COND_LE;
}
+ if (SetCCOpcode == ISD::SETULT || SetCCOpcode == ISD::SETUGE) {
+ unsigned TrailZeros = RHSC->getAPIntValue().countTrailingZeros();
+ if (TrailZeros >= 32) {
+ // The constant doesn't fit in cmp immediate field. Right shift LHS by
+ // the # of trailing zeros and truncate it to 32-bit. Then compare
+ // against shifted RHS.
+ assert(LHS.getValueType() == MVT::i64 && "Expecting a 64-bit cmp!");
+ DebugLoc dl = LHS.getDebugLoc();
+ LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
+ DAG.getNode(ISD::SRL, dl, MVT::i64, LHS,
+ DAG.getConstant(TrailZeros, MVT::i8)));
+ uint64_t C = RHSC->getZExtValue() >> TrailZeros;
+
+ if (SetCCOpcode == ISD::SETULT) {
+ // X < 0x300000000 -> (X >> 32) < 3
+ // X < 0x100000000 -> (X >> 32) == 0
+ // X < 0x200000000 -> (X >> 33) == 0
+ if (C == 1) {
+ RHS = DAG.getConstant(0, MVT::i32);
+ return X86::COND_E;
+ }
+ RHS = DAG.getConstant(C, MVT::i32);
+ return X86::COND_B;
+ } else /* SetCCOpcode == ISD::SETUGE */ {
+ // X >= 0x100000000 -> (X >> 32) >= 1
+ RHS = DAG.getConstant(C, MVT::i32);
+ return X86::COND_AE;
+ }
+ }
+ }
+ if (SetCCOpcode == ISD::SETUGT) {
+ unsigned TrailOnes = RHSC->getAPIntValue().countTrailingOnes();
+ if (TrailOnes >= 32 && !RHSC->isAllOnesValue()) {
+ assert(LHS.getValueType() == MVT::i64 && "Expecting a 64-bit cmp!");
+ DebugLoc dl = LHS.getDebugLoc();
+ LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
+ DAG.getNode(ISD::SRL, dl, MVT::i64, LHS,
+ DAG.getConstant(TrailOnes, MVT::i8)));
+ uint64_t C = (RHSC->getZExtValue()+1) >> TrailOnes;
+ // X > 0x0ffffffff -> (X >> 32) >= 1
+ RHS = DAG.getConstant(C, MVT::i32);
+ return X86::COND_AE;
+ }
+ }
}
switch (SetCCOpcode) {