aboutsummaryrefslogtreecommitdiffstats
path: root/lib/VMCore/ConstantFold.cpp
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2008-04-20 18:24:14 +0000
committerChris Lattner <sabre@nondot.org>2008-04-20 18:24:14 +0000
commit0e4b6c7551776a801fa3caf9e05f47b5c8e001dc (patch)
tree67b9bc7aab1bc06c45533100caec4cb921d937c4 /lib/VMCore/ConstantFold.cpp
parent9dd2ce46c58dd05f0835df77f308396715890d66 (diff)
downloadexternal_llvm-0e4b6c7551776a801fa3caf9e05f47b5c8e001dc.zip
external_llvm-0e4b6c7551776a801fa3caf9e05f47b5c8e001dc.tar.gz
external_llvm-0e4b6c7551776a801fa3caf9e05f47b5c8e001dc.tar.bz2
rearrange some code, simplify handling of shifts.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49995 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/VMCore/ConstantFold.cpp')
-rw-r--r--lib/VMCore/ConstantFold.cpp116
1 files changed, 56 insertions, 60 deletions
diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp
index ffc8f02..50fbe1a 100644
--- a/lib/VMCore/ConstantFold.cpp
+++ b/lib/VMCore/ConstantFold.cpp
@@ -599,44 +599,12 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
}
- if (isa<ConstantExpr>(C1)) {
- // There are many possible foldings we could do here. We should probably
- // at least fold add of a pointer with an integer into the appropriate
- // getelementptr. This will improve alias analysis a bit.
- } else if (isa<ConstantExpr>(C2)) {
- // If C2 is a constant expr and C1 isn't, flop them around and fold the
- // other way if possible.
- switch (Opcode) {
- case Instruction::Add:
- case Instruction::Mul:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- // No change of opcode required.
- return ConstantFoldBinaryInstruction(Opcode, C2, C1);
-
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- case Instruction::Sub:
- case Instruction::SDiv:
- case Instruction::UDiv:
- case Instruction::FDiv:
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::FRem:
- default: // These instructions cannot be flopped around.
- return 0;
- }
- }
-
- // At this point we know neither constant is an UndefValue nor a ConstantExpr
- // so look at directly computing the value.
+ // At this point we know neither constant is an UndefValue.
if (const ConstantInt *CI1 = dyn_cast<ConstantInt>(C1)) {
if (const ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
using namespace APIntOps;
- APInt C1V = CI1->getValue();
- APInt C2V = CI2->getValue();
+ const APInt &C1V = CI1->getValue();
+ const APInt &C2V = CI2->getValue();
switch (Opcode) {
default:
break;
@@ -672,30 +640,27 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
return ConstantInt::get(C1V | C2V);
case Instruction::Xor:
return ConstantInt::get(C1V ^ C2V);
- case Instruction::Shl:
- if (uint32_t shiftAmt = C2V.getZExtValue()) {
- if (shiftAmt < C1V.getBitWidth())
- return ConstantInt::get(C1V.shl(shiftAmt));
- else
- return UndefValue::get(C1->getType()); // too big shift is undef
- }
- return const_cast<ConstantInt*>(CI1); // Zero shift is identity
- case Instruction::LShr:
- if (uint32_t shiftAmt = C2V.getZExtValue()) {
- if (shiftAmt < C1V.getBitWidth())
- return ConstantInt::get(C1V.lshr(shiftAmt));
- else
- return UndefValue::get(C1->getType()); // too big shift is undef
- }
- return const_cast<ConstantInt*>(CI1); // Zero shift is identity
- case Instruction::AShr:
- if (uint32_t shiftAmt = C2V.getZExtValue()) {
- if (shiftAmt < C1V.getBitWidth())
- return ConstantInt::get(C1V.ashr(shiftAmt));
- else
- return UndefValue::get(C1->getType()); // too big shift is undef
- }
- return const_cast<ConstantInt*>(CI1); // Zero shift is identity
+ case Instruction::Shl: {
+ uint32_t shiftAmt = C2V.getZExtValue();
+ if (shiftAmt < C1V.getBitWidth())
+ return ConstantInt::get(C1V.shl(shiftAmt));
+ else
+ return UndefValue::get(C1->getType()); // too big shift is undef
+ }
+ case Instruction::LShr: {
+ uint32_t shiftAmt = C2V.getZExtValue();
+ if (shiftAmt < C1V.getBitWidth())
+ return ConstantInt::get(C1V.lshr(shiftAmt));
+ else
+ return UndefValue::get(C1->getType()); // too big shift is undef
+ }
+ case Instruction::AShr: {
+ uint32_t shiftAmt = C2V.getZExtValue();
+ if (shiftAmt < C1V.getBitWidth())
+ return ConstantInt::get(C1V.ashr(shiftAmt));
+ else
+ return UndefValue::get(C1->getType()); // too big shift is undef
+ }
}
}
} else if (const ConstantFP *CFP1 = dyn_cast<ConstantFP>(C1)) {
@@ -769,7 +734,38 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
}
- // We don't know how to fold this
+ if (isa<ConstantExpr>(C1)) {
+ // There are many possible foldings we could do here. We should probably
+ // at least fold add of a pointer with an integer into the appropriate
+ // getelementptr. This will improve alias analysis a bit.
+ } else if (isa<ConstantExpr>(C2)) {
+ // If C2 is a constant expr and C1 isn't, flop them around and fold the
+ // other way if possible.
+ switch (Opcode) {
+ case Instruction::Add:
+ case Instruction::Mul:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ // No change of opcode required.
+ return ConstantFoldBinaryInstruction(Opcode, C2, C1);
+
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Sub:
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ default: // These instructions cannot be flopped around.
+ break;
+ }
+ }
+
+ // We don't know how to fold this.
return 0;
}