aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2004-06-11 04:31:10 +0000
committerChris Lattner <sabre@nondot.org>2004-06-11 04:31:10 +0000
commitb4fe76cbb554e838193375fafc115cdb643d4517 (patch)
tree1a8cda659ad1d73016cc904bd460a33790ed3cb8 /lib
parent665e661384b2882432f50803bea7234c5c7df81a (diff)
downloadexternal_llvm-b4fe76cbb554e838193375fafc115cdb643d4517.zip
external_llvm-b4fe76cbb554e838193375fafc115cdb643d4517.tar.gz
external_llvm-b4fe76cbb554e838193375fafc115cdb643d4517.tar.bz2
Add direct support for the isnan intrinsic, implementing test/Regression/CodeGen/X86/isnan.llx
testcase git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@14141 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/X86/InstSelectSimple.cpp31
-rw-r--r--lib/Target/X86/X86ISelSimple.cpp31
2 files changed, 44 insertions, 18 deletions
diff --git a/lib/Target/X86/InstSelectSimple.cpp b/lib/Target/X86/InstSelectSimple.cpp
index 59d56bc..1adcd52 100644
--- a/lib/Target/X86/InstSelectSimple.cpp
+++ b/lib/Target/X86/InstSelectSimple.cpp
@@ -1628,6 +1628,7 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
case Intrinsic::frameaddress:
case Intrinsic::memcpy:
case Intrinsic::memset:
+ case Intrinsic::isnan:
case Intrinsic::readport:
case Intrinsic::writeport:
// We directly implement these intrinsics
@@ -1636,19 +1637,19 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
// On X86, memory operations are in-order. Lower this intrinsic
// into a volatile load.
Instruction *Before = CI->getPrev();
- LoadInst * LI = new LoadInst (CI->getOperand(1), "", true, CI);
- CI->replaceAllUsesWith (LI);
- BB->getInstList().erase (CI);
+ LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI);
+ CI->replaceAllUsesWith(LI);
+ BB->getInstList().erase(CI);
break;
}
case Intrinsic::writeio: {
// On X86, memory operations are in-order. Lower this intrinsic
// into a volatile store.
Instruction *Before = CI->getPrev();
- StoreInst * LI = new StoreInst (CI->getOperand(1),
- CI->getOperand(2), true, CI);
- CI->replaceAllUsesWith (LI);
- BB->getInstList().erase (CI);
+ StoreInst *LI = new StoreInst(CI->getOperand(1),
+ CI->getOperand(2), true, CI);
+ CI->replaceAllUsesWith(LI);
+ BB->getInstList().erase(CI);
break;
}
default:
@@ -1656,12 +1657,11 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
Instruction *Before = CI->getPrev();
TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
if (Before) { // Move iterator to instruction after call
- I = Before; ++I;
+ I = Before; ++I;
} else {
I = BB->begin();
}
}
-
}
void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
@@ -1698,6 +1698,19 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
}
return;
+ case Intrinsic::isnan:
+ TmpReg1 = getReg(CI.getOperand(1));
+ if (0) { // for processors prior to the P6
+ BuildMI(BB, X86::FpUCOM, 2).addReg(TmpReg1).addReg(TmpReg1);
+ BuildMI(BB, X86::FNSTSW8r, 0);
+ BuildMI(BB, X86::SAHF, 1);
+ } else {
+ BuildMI(BB, X86::FpUCOMI, 2).addReg(TmpReg1).addReg(TmpReg1);
+ }
+ TmpReg2 = getReg(CI);
+ BuildMI(BB, X86::SETPr, 0, TmpReg2);
+ return;
+
case Intrinsic::memcpy: {
assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
unsigned Align = 1;
diff --git a/lib/Target/X86/X86ISelSimple.cpp b/lib/Target/X86/X86ISelSimple.cpp
index 59d56bc..1adcd52 100644
--- a/lib/Target/X86/X86ISelSimple.cpp
+++ b/lib/Target/X86/X86ISelSimple.cpp
@@ -1628,6 +1628,7 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
case Intrinsic::frameaddress:
case Intrinsic::memcpy:
case Intrinsic::memset:
+ case Intrinsic::isnan:
case Intrinsic::readport:
case Intrinsic::writeport:
// We directly implement these intrinsics
@@ -1636,19 +1637,19 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
// On X86, memory operations are in-order. Lower this intrinsic
// into a volatile load.
Instruction *Before = CI->getPrev();
- LoadInst * LI = new LoadInst (CI->getOperand(1), "", true, CI);
- CI->replaceAllUsesWith (LI);
- BB->getInstList().erase (CI);
+ LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI);
+ CI->replaceAllUsesWith(LI);
+ BB->getInstList().erase(CI);
break;
}
case Intrinsic::writeio: {
// On X86, memory operations are in-order. Lower this intrinsic
// into a volatile store.
Instruction *Before = CI->getPrev();
- StoreInst * LI = new StoreInst (CI->getOperand(1),
- CI->getOperand(2), true, CI);
- CI->replaceAllUsesWith (LI);
- BB->getInstList().erase (CI);
+ StoreInst *LI = new StoreInst(CI->getOperand(1),
+ CI->getOperand(2), true, CI);
+ CI->replaceAllUsesWith(LI);
+ BB->getInstList().erase(CI);
break;
}
default:
@@ -1656,12 +1657,11 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) {
Instruction *Before = CI->getPrev();
TM.getIntrinsicLowering().LowerIntrinsicCall(CI);
if (Before) { // Move iterator to instruction after call
- I = Before; ++I;
+ I = Before; ++I;
} else {
I = BB->begin();
}
}
-
}
void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
@@ -1698,6 +1698,19 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
}
return;
+ case Intrinsic::isnan:
+ TmpReg1 = getReg(CI.getOperand(1));
+ if (0) { // for processors prior to the P6
+ BuildMI(BB, X86::FpUCOM, 2).addReg(TmpReg1).addReg(TmpReg1);
+ BuildMI(BB, X86::FNSTSW8r, 0);
+ BuildMI(BB, X86::SAHF, 1);
+ } else {
+ BuildMI(BB, X86::FpUCOMI, 2).addReg(TmpReg1).addReg(TmpReg1);
+ }
+ TmpReg2 = getReg(CI);
+ BuildMI(BB, X86::SETPr, 0, TmpReg2);
+ return;
+
case Intrinsic::memcpy: {
assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!");
unsigned Align = 1;