diff options
author | Dale Johannesen <dalej@apple.com> | 2008-08-25 22:34:37 +0000 |
---|---|---|
committer | Dale Johannesen <dalej@apple.com> | 2008-08-25 22:34:37 +0000 |
commit | e91a2d65596549d60c6844016ccb774bf19a4599 (patch) | |
tree | 70997c44fece748b333ab70873bf05bbb45960bf | |
parent | 993d00682c245c5d33d03fecd71d2c774df2e9d3 (diff) | |
download | external_llvm-e91a2d65596549d60c6844016ccb774bf19a4599.zip external_llvm-e91a2d65596549d60c6844016ccb774bf19a4599.tar.gz external_llvm-e91a2d65596549d60c6844016ccb774bf19a4599.tar.bz2 |
Implement 32 & 64 bit versions of PPC atomic
binary primitives.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55343 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Target/PowerPC/PPCISelLowering.cpp | 125 | ||||
-rw-r--r-- | lib/Target/PowerPC/PPCISelLowering.h | 3 | ||||
-rw-r--r-- | lib/Target/PowerPC/PPCInstr64Bit.td | 23 | ||||
-rw-r--r-- | lib/Target/PowerPC/PPCInstrInfo.td | 22 | ||||
-rw-r--r-- | lib/Target/TargetSelectionDAG.td | 105 |
5 files changed, 230 insertions, 48 deletions
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 07171f8..1908a05 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -3861,6 +3861,60 @@ SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) { //===----------------------------------------------------------------------===// MachineBasicBlock * +PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, + bool is64bit, unsigned BinOpcode) { + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction *F = BB->getParent(); + MachineFunction::iterator It = BB; + ++It; + + unsigned dest = MI->getOperand(0).getReg(); + unsigned ptrA = MI->getOperand(1).getReg(); + unsigned ptrB = MI->getOperand(2).getReg(); + unsigned incr = MI->getOperand(3).getReg(); + + MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, loopMBB); + F->insert(It, exitMBB); + exitMBB->transferSuccessors(BB); + + MachineRegisterInfo &RegInfo = F->getRegInfo(); + unsigned TmpReg = RegInfo.createVirtualRegister( + is64bit ? (const TargetRegisterClass *) &PPC::GPRCRegClass : + (const TargetRegisterClass *) &PPC::G8RCRegClass); + + // thisMBB: + // ... + // fallthrough --> loopMBB + BB->addSuccessor(loopMBB); + + // loopMBB: + // l[wd]arx dest, ptr + // add r0, dest, incr + // st[wd]cx. r0, ptr + // bne- loopMBB + // fallthrough --> exitMBB + BB = loopMBB; + BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) + .addReg(ptrA).addReg(ptrB); + BuildMI(BB, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); + BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) + .addReg(TmpReg).addReg(ptrA).addReg(ptrB); + BuildMI(BB, TII->get(PPC::BCC)) + .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); + BB->addSuccessor(loopMBB); + BB->addSuccessor(exitMBB); + + // exitMBB: + // ... + BB = exitMBB; + return BB; +} + +MachineBasicBlock * PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) { const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); @@ -3920,53 +3974,30 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); } - else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32 || - MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) { - bool is64bit = MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64; - - unsigned dest = MI->getOperand(0).getReg(); - unsigned ptrA = MI->getOperand(1).getReg(); - unsigned ptrB = MI->getOperand(2).getReg(); - unsigned incr = MI->getOperand(3).getReg(); - - MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); - F->insert(It, loopMBB); - F->insert(It, exitMBB); - exitMBB->transferSuccessors(BB); - - MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned TmpReg = RegInfo.createVirtualRegister( - is64bit ? (const TargetRegisterClass *) &PPC::GPRCRegClass : - (const TargetRegisterClass *) &PPC::G8RCRegClass); - - // thisMBB: - // ... - // fallthrough --> loopMBB - BB->addSuccessor(loopMBB); - - // loopMBB: - // l[wd]arx dest, ptr - // add r0, dest, incr - // st[wd]cx. r0, ptr - // bne- loopMBB - // fallthrough --> exitMBB - BB = loopMBB; - BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) - .addReg(ptrA).addReg(ptrB); - BuildMI(BB, TII->get(is64bit ? PPC::ADD4 : PPC::ADD8), TmpReg) - .addReg(incr).addReg(dest); - BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) - .addReg(TmpReg).addReg(ptrA).addReg(ptrB); - BuildMI(BB, TII->get(PPC::BCC)) - .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); - BB->addSuccessor(loopMBB); - BB->addSuccessor(exitMBB); - - // exitMBB: - // ... - BB = exitMBB; - } + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) + BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) + BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) + BB = EmitAtomicBinary(MI, BB, false, PPC::AND); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) + BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) + BB = EmitAtomicBinary(MI, BB, false, PPC::OR); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) + BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) + BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) + BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) + BB = EmitAtomicBinary(MI, BB, false, PPC::NAND); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) + BB = EmitAtomicBinary(MI, BB, true, PPC::NAND8); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) + BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); + else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) + BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index b2f1836..2e458f9 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -282,6 +282,9 @@ namespace llvm { virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB); + MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, + MachineBasicBlock *MBB, bool is64Bit, + unsigned BinOpcode); ConstraintType getConstraintType(const std::string &Constraint) const; std::pair<unsigned, const TargetRegisterClass*> diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td index 2e235e5..75ab193 100644 --- a/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/lib/Target/PowerPC/PPCInstr64Bit.td @@ -123,11 +123,33 @@ let usesCustomDAGSchedInserter = 1 in { (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "${:comment} ATOMIC_LOAD_ADD_I64 PSEUDO!", [(set G8RC:$dst, (atomic_load_add_64 xoaddr:$ptr, G8RC:$incr))]>; + def ATOMIC_LOAD_SUB_I64 : Pseudo< + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), + "${:comment} ATOMIC_LOAD_SUB_I64 PSEUDO!", + [(set G8RC:$dst, (atomic_load_sub_64 xoaddr:$ptr, G8RC:$incr))]>; + def ATOMIC_LOAD_OR_I64 : Pseudo< + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), + "${:comment} ATOMIC_LOAD_OR_I64 PSEUDO!", + [(set G8RC:$dst, (atomic_load_or_64 xoaddr:$ptr, G8RC:$incr))]>; + def ATOMIC_LOAD_XOR_I64 : Pseudo< + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), + "${:comment} ATOMIC_LOAD_XOR_I64 PSEUDO!", + [(set G8RC:$dst, (atomic_load_xor_64 xoaddr:$ptr, G8RC:$incr))]>; + def ATOMIC_LOAD_AND_I64 : Pseudo< + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), + "${:comment} ATOMIC_LOAD_AND_I64 PSEUDO!", + [(set G8RC:$dst, (atomic_load_and_64 xoaddr:$ptr, G8RC:$incr))]>; + def ATOMIC_LOAD_NAND_I64 : Pseudo< + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), + "${:comment} ATOMIC_LOAD_NAND_I64 PSEUDO!", + [(set G8RC:$dst, (atomic_load_nand_64 xoaddr:$ptr, G8RC:$incr))]>; + def ATOMIC_CMP_SWAP_I64 : Pseudo< (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new), "${:comment} ATOMIC_CMP_SWAP_I64 PSEUDO!", [(set G8RC:$dst, (atomic_cmp_swap_64 xoaddr:$ptr, G8RC:$old, G8RC:$new))]>; + def ATOMIC_SWAP_I64 : Pseudo< (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new), "${:comment} ATOMIC_SWAP_I64 PSEUDO!", @@ -313,7 +335,6 @@ def SUBFIC8: DForm_2< 8, (outs G8RC:$rD), (ins G8RC:$rA, s16imm64:$imm), def SUBF8 : XOForm_1<31, 40, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), "subf $rT, $rA, $rB", IntGeneral, [(set G8RC:$rT, (sub G8RC:$rB, G8RC:$rA))]>; - def SUBFC8 : XOForm_1<31, 8, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), "subfc $rT, $rA, $rB", IntGeneral, [(set G8RC:$rT, (subc G8RC:$rB, G8RC:$rA))]>, diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index debcec2..8f52458 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -532,11 +532,33 @@ let usesCustomDAGSchedInserter = 1 in { (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "${:comment} ATOMIC_LOAD_ADD_I32 PSEUDO!", [(set GPRC:$dst, (atomic_load_add_32 xoaddr:$ptr, GPRC:$incr))]>; + def ATOMIC_LOAD_SUB_I32 : Pseudo< + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), + "${:comment} ATOMIC_LOAD_SUB_I32 PSEUDO!", + [(set GPRC:$dst, (atomic_load_sub_32 xoaddr:$ptr, GPRC:$incr))]>; + def ATOMIC_LOAD_AND_I32 : Pseudo< + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), + "${:comment} ATOMIC_LOAD_AND_I32 PSEUDO!", + [(set GPRC:$dst, (atomic_load_and_32 xoaddr:$ptr, GPRC:$incr))]>; + def ATOMIC_LOAD_OR_I32 : Pseudo< + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), + "${:comment} ATOMIC_LOAD_OR_I32 PSEUDO!", + [(set GPRC:$dst, (atomic_load_or_32 xoaddr:$ptr, GPRC:$incr))]>; + def ATOMIC_LOAD_XOR_I32 : Pseudo< + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), + "${:comment} ATOMIC_LOAD_XOR_I32 PSEUDO!", + [(set GPRC:$dst, (atomic_load_xor_32 xoaddr:$ptr, GPRC:$incr))]>; + def ATOMIC_LOAD_NAND_I32 : Pseudo< + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), + "${:comment} ATOMIC_LOAD_NAND_I32 PSEUDO!", + [(set GPRC:$dst, (atomic_load_nand_32 xoaddr:$ptr, GPRC:$incr))]>; + def ATOMIC_CMP_SWAP_I32 : Pseudo< (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "${:comment} ATOMIC_CMP_SWAP_I32 PSEUDO!", [(set GPRC:$dst, (atomic_cmp_swap_32 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>; + def ATOMIC_SWAP_I32 : Pseudo< (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "${:comment} ATOMIC_SWAP_I32 PSEUDO!", diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index a05d91b..e85589f 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -767,6 +767,111 @@ def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc), return V->getValueType(0) == MVT::i64; }]>; +def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_sub node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; +}]>; +def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_sub node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; +}]>; +def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_sub node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; +}]>; +def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_sub node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; +}]>; + +def atomic_load_and_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_and node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; +}]>; +def atomic_load_and_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_and node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; +}]>; +def atomic_load_and_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_and node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; +}]>; +def atomic_load_and_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_and node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; +}]>; + +def atomic_load_or_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_or node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; +}]>; +def atomic_load_or_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_or node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; +}]>; +def atomic_load_or_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_or node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; +}]>; +def atomic_load_or_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_or node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; +}]>; + +def atomic_load_xor_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_xor node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; +}]>; +def atomic_load_xor_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_xor node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; +}]>; +def atomic_load_xor_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_xor node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; +}]>; +def atomic_load_xor_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_xor node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; +}]>; + +def atomic_load_nand_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_nand node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; +}]>; +def atomic_load_nand_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_nand node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; +}]>; +def atomic_load_nand_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_nand node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; +}]>; +def atomic_load_nand_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_nand node:$ptr, node:$inc), [{ + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; +}]>; + def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc), (atomic_swap node:$ptr, node:$inc), [{ AtomicSDNode* V = cast<AtomicSDNode>(N); |