aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target
diff options
context:
space:
mode:
authorScott Michel <scottm@aero.org>2008-01-30 02:55:46 +0000
committerScott Michel <scottm@aero.org>2008-01-30 02:55:46 +0000
commit7f9ba9bb3c969eab32118dd21f15b4b74843c5c1 (patch)
tree7a7ddd1878e8a8dc62d910ee1e88b5c034b4c77d /lib/Target
parent772601a8850808a66270372164941e373074493d (diff)
downloadexternal_llvm-7f9ba9bb3c969eab32118dd21f15b4b74843c5c1.zip
external_llvm-7f9ba9bb3c969eab32118dd21f15b4b74843c5c1.tar.gz
external_llvm-7f9ba9bb3c969eab32118dd21f15b4b74843c5c1.tar.bz2
More cleanups for CellSPU:
- Expand tabs... (poss 80-col violations, will get them later...) - Consolidate logic for SelectDFormAddr and SelectDForm2Addr into a single function, simplifying maintenance. Also reduced custom instruction generation for SPUvecinsert/INSERT_MASK. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46544 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/CellSPU/Makefile2
-rw-r--r--lib/Target/CellSPU/README.txt4
-rw-r--r--lib/Target/CellSPU/SPU.h2
-rw-r--r--lib/Target/CellSPU/SPUAsmPrinter.cpp10
-rw-r--r--lib/Target/CellSPU/SPUISelDAGToDAG.cpp270
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp421
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.h40
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.cpp4
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.h14
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.td38
-rw-r--r--lib/Target/CellSPU/SPUNodes.td4
-rw-r--r--lib/Target/CellSPU/SPURegisterInfo.cpp30
-rw-r--r--lib/Target/CellSPU/SPURegisterInfo.h4
-rw-r--r--lib/Target/CellSPU/SPUSubtarget.h2
-rw-r--r--lib/Target/CellSPU/SPUTargetMachine.cpp2
15 files changed, 389 insertions, 458 deletions
diff --git a/lib/Target/CellSPU/Makefile b/lib/Target/CellSPU/Makefile
index b204650..13fa781 100644
--- a/lib/Target/CellSPU/Makefile
+++ b/lib/Target/CellSPU/Makefile
@@ -13,7 +13,7 @@ TARGET = SPU
BUILT_SOURCES = SPUGenInstrNames.inc SPUGenRegisterNames.inc \
SPUGenAsmWriter.inc SPUGenCodeEmitter.inc \
- SPUGenRegisterInfo.h.inc SPUGenRegisterInfo.inc \
+ SPUGenRegisterInfo.h.inc SPUGenRegisterInfo.inc \
SPUGenInstrInfo.inc SPUGenDAGISel.inc \
SPUGenSubtarget.inc SPUGenCallingConv.inc
diff --git a/lib/Target/CellSPU/README.txt b/lib/Target/CellSPU/README.txt
index 5f63840..1d90f2a 100644
--- a/lib/Target/CellSPU/README.txt
+++ b/lib/Target/CellSPU/README.txt
@@ -28,8 +28,8 @@ SUCH DAMAGES ARE FORESEEABLE.
If you are brave enough to try this code or help to hack on it, be sure
to add 'spu' to configure's --enable-targets option, e.g.:
- ./configure <your_configure_flags_here> \
- --enable-targets=x86,x86_64,powerpc,spu
+ ./configure <your_configure_flags_here> \
+ --enable-targets=x86,x86_64,powerpc,spu
---------------------------------------------------------------------------
diff --git a/lib/Target/CellSPU/SPU.h b/lib/Target/CellSPU/SPU.h
index 87f03d1..9fbf524 100644
--- a/lib/Target/CellSPU/SPU.h
+++ b/lib/Target/CellSPU/SPU.h
@@ -35,7 +35,7 @@ namespace llvm {
inline bool isS10Constant(short Value) {
int SExtValue = ((int) Value << (32 - 10)) >> (32 - 10);
return ((Value > 0 && Value <= (1 << 9) - 1)
- || (Value < 0 && (short) SExtValue == Value));
+ || (Value < 0 && (short) SExtValue == Value));
}
inline bool isS10Constant(int Value) {
diff --git a/lib/Target/CellSPU/SPUAsmPrinter.cpp b/lib/Target/CellSPU/SPUAsmPrinter.cpp
index 4bd8673..031e997 100644
--- a/lib/Target/CellSPU/SPUAsmPrinter.cpp
+++ b/lib/Target/CellSPU/SPUAsmPrinter.cpp
@@ -102,7 +102,7 @@ namespace {
value = (value << (32 - 7)) >> (32 - 7);
assert((value >= -(1 << 8) && value <= (1 << 7) - 1)
- && "Invalid s7 argument");
+ && "Invalid s7 argument");
O << value;
}
@@ -185,7 +185,7 @@ namespace {
{
const MachineOperand &MO = MI->getOperand(OpNo);
assert(MO.isImmediate()
- && "printMemRegImmS10 first operand is not immedate");
+ && "printMemRegImmS10 first operand is not immedate");
printS10ImmOperand(MI, OpNo);
O << "(";
printOperand(MI, OpNo+1);
@@ -246,7 +246,7 @@ namespace {
if (MI->getOperand(OpNo).isImmediate()) {
int value = (int) MI->getOperand(OpNo).getImm();
assert((value >= 0 && value < 16)
- && "Invalid negated immediate rotate 7-bit argument");
+ && "Invalid negated immediate rotate 7-bit argument");
O << -value;
} else {
assert(0 &&"Invalid/non-immediate rotate amount in printRotateNeg7Imm");
@@ -257,7 +257,7 @@ namespace {
if (MI->getOperand(OpNo).isImmediate()) {
int value = (int) MI->getOperand(OpNo).getImm();
assert((value >= 0 && value < 32)
- && "Invalid negated immediate rotate 7-bit argument");
+ && "Invalid negated immediate rotate 7-bit argument");
O << -value;
} else {
assert(0 &&"Invalid/non-immediate rotate amount in printRotateNeg7Imm");
@@ -385,7 +385,7 @@ bool SPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
}
bool SPUAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
- unsigned OpNo,
+ unsigned OpNo,
unsigned AsmVariant,
const char *ExtraCode) {
if (ExtraCode && ExtraCode[0])
diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
index 3905d55..8bde663 100644
--- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
+++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
@@ -165,26 +165,25 @@ namespace {
struct valtype_map_s {
MVT::ValueType VT;
- unsigned ldresult_ins; /// LDRESULT instruction (0 = undefined)
- int prefslot_byte; /// Byte offset of the "preferred" slot
- unsigned insmask_ins; /// Insert mask instruction for a-form
+ unsigned ldresult_ins; /// LDRESULT instruction (0 = undefined)
+ int prefslot_byte; /// Byte offset of the "preferred" slot
};
const valtype_map_s valtype_map[] = {
- { MVT::i1, 0, 3, 0 },
- { MVT::i8, SPU::ORBIr8, 3, 0 },
- { MVT::i16, SPU::ORHIr16, 2, 0 },
- { MVT::i32, SPU::ORIr32, 0, 0 },
- { MVT::i64, SPU::ORIr64, 0, 0 },
- { MVT::f32, 0, 0, 0 },
- { MVT::f64, 0, 0, 0 },
+ { MVT::i1, 0, 3 },
+ { MVT::i8, SPU::ORBIr8, 3 },
+ { MVT::i16, SPU::ORHIr16, 2 },
+ { MVT::i32, SPU::ORIr32, 0 },
+ { MVT::i64, SPU::ORIr64, 0 },
+ { MVT::f32, 0, 0 },
+ { MVT::f64, 0, 0 },
// vector types... (sigh!)
- { MVT::v16i8, 0, 0, SPU::CBD },
- { MVT::v8i16, 0, 0, SPU::CHD },
- { MVT::v4i32, 0, 0, SPU::CWD },
- { MVT::v2i64, 0, 0, 0 },
- { MVT::v4f32, 0, 0, SPU::CWD },
- { MVT::v2f64, 0, 0, 0 }
+ { MVT::v16i8, 0, 0 },
+ { MVT::v8i16, 0, 0 },
+ { MVT::v4i32, 0, 0 },
+ { MVT::v2i64, 0, 0 },
+ { MVT::v4f32, 0, 0 },
+ { MVT::v2f64, 0, 0 }
};
const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
@@ -194,8 +193,8 @@ namespace {
const valtype_map_s *retval = 0;
for (size_t i = 0; i < n_valtype_map; ++i) {
if (valtype_map[i].VT == VT) {
- retval = valtype_map + i;
- break;
+ retval = valtype_map + i;
+ break;
}
}
@@ -203,8 +202,8 @@ namespace {
#ifndef NDEBUG
if (retval == 0) {
cerr << "SPUISelDAGToDAG.cpp: getValueTypeMapEntry returns NULL for "
- << MVT::getValueTypeString(VT)
- << "\n";
+ << MVT::getValueTypeString(VT)
+ << "\n";
abort();
}
#endif
@@ -259,42 +258,46 @@ public:
/// target-specific node if it hasn't already been changed.
SDNode *Select(SDOperand Op);
- /// Return true if the address N is a RI7 format address [r+imm]
- bool SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
- SDOperand &Base);
-
//! Returns true if the address N is an A-form (local store) address
bool SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
- SDOperand &Index);
+ SDOperand &Index);
//! D-form address predicate
bool SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
- SDOperand &Index);
+ SDOperand &Index);
+
+ /// Alternate D-form address using i7 offset predicate
+ bool SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
+ SDOperand &Base);
+
+ /// D-form address selection workhorse
+ bool DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Disp,
+ SDOperand &Base, int minOffset, int maxOffset);
//! Address predicate if N can be expressed as an indexed [r+r] operation.
bool SelectXFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
- SDOperand &Index);
+ SDOperand &Index);
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op,
- char ConstraintCode,
- std::vector<SDOperand> &OutOps,
- SelectionDAG &DAG) {
+ char ConstraintCode,
+ std::vector<SDOperand> &OutOps,
+ SelectionDAG &DAG) {
SDOperand Op0, Op1;
switch (ConstraintCode) {
default: return true;
case 'm': // memory
if (!SelectDFormAddr(Op, Op, Op0, Op1)
- && !SelectAFormAddr(Op, Op, Op0, Op1))
- SelectXFormAddr(Op, Op, Op0, Op1);
+ && !SelectAFormAddr(Op, Op, Op0, Op1))
+ SelectXFormAddr(Op, Op, Op0, Op1);
break;
case 'o': // offsetable
if (!SelectDFormAddr(Op, Op, Op0, Op1)
- && !SelectAFormAddr(Op, Op, Op0, Op1)) {
- Op0 = Op;
- AddToISelQueue(Op0); // r+0.
- Op1 = getSmallIPtrImm(0);
+ && !SelectAFormAddr(Op, Op, Op0, Op1)) {
+ Op0 = Op;
+ AddToISelQueue(Op0); // r+0.
+ Op1 = getSmallIPtrImm(0);
}
break;
case 'v': // not offsetable
@@ -346,52 +349,6 @@ SPUDAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG)
ScheduleAndEmitDAG(DAG);
}
-bool
-SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
- SDOperand &Base) {
- unsigned Opc = N.getOpcode();
- unsigned VT = N.getValueType();
- MVT::ValueType PtrVT = SPUtli.getPointerTy();
- ConstantSDNode *CN = 0;
- int Imm;
-
- if (Opc == ISD::ADD) {
- SDOperand Op0 = N.getOperand(0);
- SDOperand Op1 = N.getOperand(1);
- if (Op1.getOpcode() == ISD::Constant ||
- Op1.getOpcode() == ISD::TargetConstant) {
- CN = cast<ConstantSDNode>(Op1);
- Imm = int(CN->getValue());
- if (Imm <= 0xff) {
- Disp = CurDAG->getTargetConstant(Imm, SPUtli.getPointerTy());
- Base = Op0;
- return true;
- }
- }
- } else if (Opc == ISD::GlobalAddress
- || Opc == ISD::TargetGlobalAddress
- || Opc == ISD::Register) {
- // Plain old local store address:
- Disp = CurDAG->getTargetConstant(0, VT);
- Base = N;
- return true;
- } else if (Opc == SPUISD::IndirectAddr) {
- SDOperand Op1 = N.getOperand(1);
- if (Op1.getOpcode() == ISD::TargetConstant
- || Op1.getOpcode() == ISD::Constant) {
- CN = cast<ConstantSDNode>(N.getOperand(1));
- assert(CN != 0 && "SelectIndirectAddr/SPUISD::DForm2Addr expecting constant");
- Imm = unsigned(CN->getValue());
- if (Imm < 0xff) {
- Disp = CurDAG->getTargetConstant(CN->getValue(), PtrVT);
- Base = N.getOperand(0);
- return true;
- }
- }
- }
- return false;
-}
-
/*!
\arg Op The ISD instructio operand
\arg N The address to be tested
@@ -400,7 +357,7 @@ SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
*/
bool
SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
- SDOperand &Index) {
+ SDOperand &Index) {
// These match the addr256k operand type:
MVT::ValueType OffsVT = MVT::i16;
SDOperand Zero = CurDAG->getTargetConstant(0, OffsVT);
@@ -450,6 +407,12 @@ SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
return false;
}
+bool
+SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
+ SDOperand &Base) {
+ return DFormAddressPredicate(Op, N, Disp, Base, -(1 << 7), (1 << 7) - 1);
+}
+
/*!
\arg Op The ISD instruction (ignored)
\arg N The address to be tested
@@ -464,7 +427,16 @@ SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
*/
bool
SPUDAGToDAGISel::SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
- SDOperand &Index) {
+ SDOperand &Index) {
+ return DFormAddressPredicate(Op, N, Base, Index,
+ SPUFrameInfo::minFrameOffset(),
+ SPUFrameInfo::maxFrameOffset());
+}
+
+bool
+SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Base,
+ SDOperand &Index, int minOffset,
+ int maxOffset) {
unsigned Opc = N.getOpcode();
unsigned PtrTy = SPUtli.getPointerTy();
@@ -472,8 +444,8 @@ SPUDAGToDAGISel::SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
// Stack frame index must be less than 512 (divided by 16):
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
DEBUG(cerr << "SelectDFormAddr: ISD::FrameIndex = "
- << FI->getIndex() << "\n");
- if (FI->getIndex() < SPUFrameInfo::maxFrameOffset()) {
+ << FI->getIndex() << "\n");
+ if (FI->getIndex() < maxOffset) {
Base = CurDAG->getTargetConstant(0, PtrTy);
Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
return true;
@@ -498,13 +470,12 @@ SPUDAGToDAGISel::SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
<< " frame index = " << FI->getIndex() << "\n");
- if (FI->getIndex() < SPUFrameInfo::maxFrameOffset()) {
+ if (FI->getIndex() < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
return true;
}
- } else if (offset > SPUFrameInfo::minFrameOffset()
- && offset < SPUFrameInfo::maxFrameOffset()) {
+ } else if (offset > minOffset && offset < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
Index = Op0;
return true;
@@ -519,13 +490,12 @@ SPUDAGToDAGISel::SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
<< " frame index = " << FI->getIndex() << "\n");
- if (FI->getIndex() < SPUFrameInfo::maxFrameOffset()) {
+ if (FI->getIndex() < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
return true;
}
- } else if (offset > SPUFrameInfo::minFrameOffset()
- && offset < SPUFrameInfo::maxFrameOffset()) {
+ } else if (offset > minOffset && offset < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
Index = Op1;
return true;
@@ -535,39 +505,41 @@ SPUDAGToDAGISel::SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
// Indirect with constant offset -> D-Form address
const SDOperand Op0 = N.getOperand(0);
const SDOperand Op1 = N.getOperand(1);
- SDOperand Zero = CurDAG->getTargetConstant(0, N.getValueType());
- if (Op1.getOpcode() == ISD::Constant
- || Op1.getOpcode() == ISD::TargetConstant) {
- ConstantSDNode *CN = cast<ConstantSDNode>(Op1);
- int32_t offset = int32_t(CN->getSignExtended());
- if (offset > SPUFrameInfo::minFrameOffset()
- && offset < SPUFrameInfo::maxFrameOffset()) {
- Base = CurDAG->getTargetConstant(CN->getValue(), PtrTy);
- Index = Op0;
- return true;
- }
- } else if (Op0.getOpcode() == ISD::Constant
- || Op0.getOpcode() == ISD::TargetConstant) {
- ConstantSDNode *CN = cast<ConstantSDNode>(Op0);
- int32_t offset = int32_t(CN->getSignExtended());
- if (offset > SPUFrameInfo::minFrameOffset()
- && offset < SPUFrameInfo::maxFrameOffset()) {
- Base = CurDAG->getTargetConstant(CN->getValue(), PtrTy);
- Index = Op1;
- return true;
- }
- } else if (Op0.getOpcode() == SPUISD::Hi
- && Op1.getOpcode() == SPUISD::Lo) {
+ if (Op0.getOpcode() == SPUISD::Hi
+ && Op1.getOpcode() == SPUISD::Lo) {
// (SPUindirect (SPUhi <arg>, 0), (SPUlo <arg>, 0))
Base = CurDAG->getTargetConstant(0, PtrTy);
Index = N;
return true;
+ } else if (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1)) {
+ int32_t offset = 0;
+ SDOperand idxOp;
+
+ if (isa<ConstantSDNode>(Op1)) {
+ ConstantSDNode *CN = cast<ConstantSDNode>(Op1);
+ offset = int32_t(CN->getSignExtended());
+ idxOp = Op0;
+ } else if (isa<ConstantSDNode>(Op0)) {
+ ConstantSDNode *CN = cast<ConstantSDNode>(Op0);
+ offset = int32_t(CN->getSignExtended());
+ idxOp = Op1;
+ }
+
+ if (offset >= minOffset && offset <= maxOffset) {
+ Base = CurDAG->getTargetConstant(offset, PtrTy);
+ Index = idxOp;
+ return true;
+ }
}
} else if (Opc == SPUISD::AFormAddr) {
Base = CurDAG->getTargetConstant(0, N.getValueType());
Index = N;
return true;
+ } else if (Opc == SPUISD::LDRESULT) {
+ Base = CurDAG->getTargetConstant(0, N.getValueType());
+ Index = N;
+ return true;
}
return false;
}
@@ -584,7 +556,7 @@ SPUDAGToDAGISel::SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
*/
bool
SPUDAGToDAGISel::SelectXFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
- SDOperand &Index) {
+ SDOperand &Index) {
if (SelectAFormAddr(Op, N, Base, Index)
|| SelectDFormAddr(Op, N, Base, Index))
return false;
@@ -637,38 +609,6 @@ SPUDAGToDAGISel::Select(SDOperand Op) {
n_ops = 2;
}
}
- } else if (Opc == SPUISD::INSERT_MASK) {
- SDOperand Op0 = Op.getOperand(0);
- if (Op0.getOpcode() == SPUISD::AFormAddr) {
- // (SPUvecinsmask (SPUaform <arg>, 0)) ->
- // (CBD|CHD|CWD 0, arg)
- const valtype_map_s *vtm = getValueTypeMapEntry(OpVT);
- ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getOperand(1));
- assert(vtm->insmask_ins != 0 && "missing insert mask instruction");
- NewOpc = vtm->insmask_ins;
- Ops[0] = CurDAG->getTargetConstant(CN->getValue(), Op0.getValueType());
- Ops[1] = Op0;
- n_ops = 2;
-
- AddToISelQueue(Op0);
- } else if (Op0.getOpcode() == ISD::FrameIndex) {
- // (SPUvecinsmask <fi>) ->
- // (CBD|CHD|CWD 0, <fi>)
- const valtype_map_s *vtm = getValueTypeMapEntry(OpVT);
- NewOpc = vtm->insmask_ins;
- Ops[0] = CurDAG->getTargetConstant(0, Op0.getValueType());
- Ops[1] = Op0;
- n_ops = 2;
- } else if (isHighLow(Op0)) {
- // (SPUvecinsmask (SPUindirect (SPUhi <arg>, 0), (SPUlow <arg>, 0))) ->
- // (CBD|CHD|CWD 0, arg)
- const valtype_map_s *vtm = getValueTypeMapEntry(OpVT);
- NewOpc = vtm->insmask_ins;
- Ops[0] = CurDAG->getTargetConstant(0, Op0.getValueType());
- Ops[1] = Op0;
- n_ops = 2;
- AddToISelQueue(Op0);
- }
} else if (Opc == SPUISD::LDRESULT) {
// Custom select instructions for LDRESULT
unsigned VT = N->getValueType(0);
@@ -682,12 +622,12 @@ SPUDAGToDAGISel::Select(SDOperand Op) {
const valtype_map_s *vtm = getValueTypeMapEntry(VT);
if (vtm->ldresult_ins == 0) {
- cerr << "LDRESULT for unsupported type: "
- << MVT::getValueTypeString(VT)
- << "\n";
- abort();
+ cerr << "LDRESULT for unsupported type: "
+ << MVT::getValueTypeString(VT)
+ << "\n";
+ abort();
} else
- Opc = vtm->ldresult_ins;
+ Opc = vtm->ldresult_ins;
AddToISelQueue(Zero);
Result = CurDAG->getTargetNode(Opc, VT, MVT::Other, Arg, Zero, Chain);
@@ -702,17 +642,13 @@ SPUDAGToDAGISel::Select(SDOperand Op) {
return Result;
} else if (Opc == SPUISD::IndirectAddr) {
SDOperand Op0 = Op.getOperand(0);
- if (Op0.getOpcode() == SPUISD::LDRESULT
- || Op0.getOpcode() == SPUISD::AFormAddr) {
- // (IndirectAddr (LDRESULT|AFormAddr, imm))
+ if (Op0.getOpcode() == SPUISD::LDRESULT) {
+ /* || Op0.getOpcode() == SPUISD::AFormAddr) */
+ // (IndirectAddr (LDRESULT, imm))
SDOperand Op1 = Op.getOperand(1);
MVT::ValueType VT = Op.getValueType();
- DEBUG(cerr << "CellSPU: IndirectAddr("
- << (Op0.getOpcode() == SPUISD::LDRESULT
- ? "LDRESULT"
- : "AFormAddr")
- << ", imm):\nOp0 = ");
+ DEBUG(cerr << "CellSPU: IndirectAddr(LDRESULT, imm):\nOp0 = ");
DEBUG(Op.getOperand(0).Val->dump(CurDAG));
DEBUG(cerr << "\nOp1 = ");
DEBUG(Op.getOperand(1).Val->dump(CurDAG));
@@ -721,13 +657,13 @@ SPUDAGToDAGISel::Select(SDOperand Op) {
if (Op1.getOpcode() == ISD::Constant) {
ConstantSDNode *CN = cast<ConstantSDNode>(Op1);
Op1 = CurDAG->getTargetConstant(CN->getValue(), VT);
+ NewOpc = (isI32IntS10Immediate(CN) ? SPU::AIr32 : SPU::Ar32);
+ AddToISelQueue(Op0);
+ AddToISelQueue(Op1);
+ Ops[0] = Op0;
+ Ops[1] = Op1;
+ n_ops = 2;
}
- AddToISelQueue(Op0);
- AddToISelQueue(Op1);
- NewOpc = SPU::AIr32;
- Ops[0] = Op0;
- Ops[1] = Op1;
- n_ops = 2;
}
}
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index c7d7f97..407b8e6 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -39,8 +39,8 @@ namespace {
//! MVT::ValueType mapping to useful data for Cell SPU
struct valtype_map_s {
- const MVT::ValueType valtype;
- const int prefslot_byte;
+ const MVT::ValueType valtype;
+ const int prefslot_byte;
};
const valtype_map_s valtype_map[] = {
@@ -61,16 +61,16 @@ namespace {
for (size_t i = 0; i < n_valtype_map; ++i) {
if (valtype_map[i].valtype == VT) {
- retval = valtype_map + i;
- break;
+ retval = valtype_map + i;
+ break;
}
}
#ifndef NDEBUG
if (retval == 0) {
cerr << "getValueTypeMapEntry returns NULL for "
- << MVT::getValueTypeString(VT)
- << "\n";
+ << MVT::getValueTypeString(VT)
+ << "\n";
abort();
}
#endif
@@ -106,7 +106,7 @@ namespace {
{
const unsigned Opc = Op.getOpcode();
return (Opc == ISD::Register
- || Opc == SPUISD::LDRESULT);
+ || Opc == SPUISD::LDRESULT);
}
}
@@ -508,7 +508,7 @@ AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST,
if (!isMemoryOperand(basePtr) || (alignOffs & ~0xf) != 0) {
basePtr = DAG.getNode(ISD::ADD, PtrVT,
basePtr,
- DAG.getConstant((alignOffs & ~0xf), PtrVT));
+ DAG.getConstant((alignOffs & ~0xf), PtrVT));
}
// Emit the vector load:
@@ -526,7 +526,7 @@ AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST,
// Add the offset
basePtr = DAG.getNode(ISD::ADD, PtrVT, basePtr,
- DAG.getConstant((alignOffs & ~0xf), PtrVT));
+ DAG.getConstant((alignOffs & ~0xf), PtrVT));
was16aligned = false;
return DAG.getLoad(MVT::v16i8, chain, basePtr,
LSN->getSrcValue(), LSN->getSrcValueOffset(),
@@ -570,10 +570,10 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
if (was16aligned) {
Ops[2] = DAG.getConstant(rotamt, MVT::i16);
} else {
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
LoadSDNode *LN1 = cast<LoadSDNode>(result);
Ops[2] = DAG.getNode(ISD::ADD, PtrVT, LN1->getBasePtr(),
- DAG.getConstant(rotamt, PtrVT));
+ DAG.getConstant(rotamt, PtrVT));
}
result = DAG.getNode(SPUISD::ROTBYTES_LEFT_CHAINED, vecvts, Ops, 3);
@@ -616,10 +616,9 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
}
SDVTList retvts = DAG.getVTList(OpVT, MVT::Other);
- SDOperand retops[3] = {
+ SDOperand retops[2] = {
result,
- the_chain,
- DAG.getConstant(alignment, MVT::i32)
+ the_chain
};
result = DAG.getNode(SPUISD::LDRESULT, retvts,
@@ -683,8 +682,8 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDOperand result;
if (StVT != VT
- && (theValue.getOpcode() == ISD::AssertZext
- || theValue.getOpcode() == ISD::AssertSext)) {
+ && (theValue.getOpcode() == ISD::AssertZext
+ || theValue.getOpcode() == ISD::AssertSext)) {
// Drill down and get the value for zero- and sign-extended
// quantities
theValue = theValue.getOperand(0);
@@ -722,9 +721,9 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
insertEltOp = DAG.getNode(SPUISD::INSERT_MASK, stVecVT, insertEltPtr);
result = DAG.getNode(SPUISD::SHUFB, vecVT,
- DAG.getNode(ISD::SCALAR_TO_VECTOR, vecVT, theValue),
- alignLoadVec,
- DAG.getNode(ISD::BIT_CONVERT, vecVT, insertEltOp));
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, vecVT, theValue),
+ alignLoadVec,
+ DAG.getNode(ISD::BIT_CONVERT, vecVT, insertEltOp));
result = DAG.getStore(the_chain, result, basePtr,
LN->getSrcValue(), LN->getSrcValueOffset(),
@@ -818,7 +817,7 @@ LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
}
} else {
cerr << "LowerGlobalAddress: Relocation model other than static not "
- << "supported.\n";
+ << "supported.\n";
abort();
/*NOTREACHED*/
}
@@ -839,12 +838,12 @@ LowerConstant(SDOperand Op, SelectionDAG &DAG) {
if (VT == MVT::i64) {
SDOperand T = DAG.getConstant(CN->getValue(), MVT::i64);
return DAG.getNode(SPUISD::EXTRACT_ELT0, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
} else {
cerr << "LowerConstant: unhandled constant type "
- << MVT::getValueTypeString(VT)
- << "\n";
+ << MVT::getValueTypeString(VT)
+ << "\n";
abort();
/*NOTREACHED*/
}
@@ -864,16 +863,16 @@ LowerConstantFP(SDOperand Op, SelectionDAG &DAG) {
ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.Val);
assert((FP != 0) &&
- "LowerConstantFP: Node is not ConstantFPSDNode");
+ "LowerConstantFP: Node is not ConstantFPSDNode");
if (VT == MVT::f32) {
float targetConst = FP->getValueAPF().convertToFloat();
return DAG.getNode(SPUISD::SFPConstant, VT,
- DAG.getTargetConstantFP(targetConst, VT));
+ DAG.getTargetConstantFP(targetConst, VT));
} else if (VT == MVT::f64) {
uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
return DAG.getNode(ISD::BIT_CONVERT, VT,
- LowerConstant(DAG.getConstant(dbits, MVT::i64), DAG));
+ LowerConstant(DAG.getConstant(dbits, MVT::i64), DAG));
}
return SDOperand();
@@ -926,7 +925,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
switch (ObjectVT) {
default: {
cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: "
- << MVT::getValueTypeString(ObjectVT)
+ << MVT::getValueTypeString(ObjectVT)
<< "\n";
abort();
}
@@ -1126,7 +1125,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
} else {
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += StackSlotSize;
+ ArgOffset += StackSlotSize;
}
break;
case MVT::f32:
@@ -1135,7 +1134,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
} else {
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += StackSlotSize;
+ ArgOffset += StackSlotSize;
}
break;
case MVT::v4f32:
@@ -1146,7 +1145,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
} else {
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += StackSlotSize;
+ ArgOffset += StackSlotSize;
}
break;
}
@@ -1378,19 +1377,19 @@ SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
int SExtValue = ((Value & 0xffff) << 16) >> 16;
if (Value == SExtValue)
- return DAG.getConstant(Value, ValueType);
+ return DAG.getConstant(Value, ValueType);
} else if (ValueType == MVT::i16) {
short Value = (short) CN->getValue();
int SExtValue = ((int) Value << 16) >> 16;
if (Value == (short) SExtValue)
- return DAG.getConstant(Value, ValueType);
+ return DAG.getConstant(Value, ValueType);
} else if (ValueType == MVT::i64) {
int64_t Value = CN->getValue();
int64_t SExtValue = ((Value & 0xffff) << (64 - 16)) >> (64 - 16);
if (Value == SExtValue)
- return DAG.getConstant(Value, ValueType);
+ return DAG.getConstant(Value, ValueType);
}
}
@@ -1405,7 +1404,7 @@ SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
if (ConstantSDNode *CN = getVecImm(N)) {
int Value = (int) CN->getValue();
if ((ValueType == MVT::i32 && isS10Constant(Value))
- || (ValueType == MVT::i16 && isS10Constant((short) Value)))
+ || (ValueType == MVT::i16 && isS10Constant((short) Value)))
return DAG.getConstant(Value, ValueType);
}
@@ -1424,11 +1423,11 @@ SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
if (ConstantSDNode *CN = getVecImm(N)) {
int Value = (int) CN->getValue();
if (ValueType == MVT::i16
- && Value <= 0xffff /* truncated from uint64_t */
- && ((short) Value >> 8) == ((short) Value & 0xff))
+ && Value <= 0xffff /* truncated from uint64_t */
+ && ((short) Value >> 8) == ((short) Value & 0xff))
return DAG.getConstant(Value & 0xff, ValueType);
else if (ValueType == MVT::i8
- && (Value & 0xff) == Value)
+ && (Value & 0xff) == Value)
return DAG.getConstant(Value, ValueType);
}
@@ -1443,8 +1442,8 @@ SDOperand SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
if (ConstantSDNode *CN = getVecImm(N)) {
uint64_t Value = CN->getValue();
if ((ValueType == MVT::i32
- && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
- || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
+ && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
+ || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
return DAG.getConstant(Value >> 16, ValueType);
}
@@ -1496,8 +1495,8 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
const APFloat &apf = CN->getValueAPF();
EltBits = (CN->getValueType(0) == MVT::f32
- ? FloatToBits(apf.convertToFloat())
- : DoubleToBits(apf.convertToDouble()));
+ ? FloatToBits(apf.convertToFloat())
+ : DoubleToBits(apf.convertToDouble()));
} else {
// Nonconstant element.
return true;
@@ -1517,7 +1516,7 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
/// SplatSize = 1 byte.
static bool isConstantSplat(const uint64_t Bits128[2],
const uint64_t Undef128[2],
- int MinSplatBits,
+ int MinSplatBits,
uint64_t &SplatBits, uint64_t &SplatUndef,
int &SplatSize) {
// Don't let undefs prevent splats from matching. See if the top 64-bits are
@@ -1535,34 +1534,34 @@ static bool isConstantSplat(const uint64_t Bits128[2],
// Check that the top 32-bits are the same as the lower 32-bits, ignoring
// undefs.
if ((Bits64 & (~Undef64 >> 32)) == ((Bits64 >> 32) & ~Undef64)) {
- if (MinSplatBits < 32) {
-
- // If the top 16-bits are different than the lower 16-bits, ignoring
- // undefs, we have an i32 splat.
- if ((Bits32 & (~Undef32 >> 16)) == ((Bits32 >> 16) & ~Undef32)) {
- if (MinSplatBits < 16) {
- // If the top 8-bits are different than the lower 8-bits, ignoring
- // undefs, we have an i16 splat.
- if ((Bits16 & (uint16_t(~Undef16) >> 8)) == ((Bits16 >> 8) & ~Undef16)) {
- // Otherwise, we have an 8-bit splat.
- SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
- SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
- SplatSize = 1;
- return true;
- }
- } else {
- SplatBits = Bits16;
- SplatUndef = Undef16;
- SplatSize = 2;
- return true;
- }
- }
- } else {
- SplatBits = Bits32;
- SplatUndef = Undef32;
- SplatSize = 4;
- return true;
- }
+ if (MinSplatBits < 32) {
+
+ // If the top 16-bits are different than the lower 16-bits, ignoring
+ // undefs, we have an i32 splat.
+ if ((Bits32 & (~Undef32 >> 16)) == ((Bits32 >> 16) & ~Undef32)) {
+ if (MinSplatBits < 16) {
+ // If the top 8-bits are different than the lower 8-bits, ignoring
+ // undefs, we have an i16 splat.
+ if ((Bits16 & (uint16_t(~Undef16) >> 8)) == ((Bits16 >> 8) & ~Undef16)) {
+ // Otherwise, we have an 8-bit splat.
+ SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
+ SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
+ SplatSize = 1;
+ return true;
+ }
+ } else {
+ SplatBits = Bits16;
+ SplatUndef = Undef16;
+ SplatSize = 2;
+ return true;
+ }
+ }
+ } else {
+ SplatBits = Bits32;
+ SplatUndef = Undef32;
+ SplatSize = 4;
+ return true;
+ }
}
} else {
SplatBits = Bits128[0];
@@ -1592,7 +1591,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
int SplatSize;
if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)
|| !isConstantSplat(VectorBits, UndefBits,
- MVT::getSizeInBits(MVT::getVectorElementType(VT)),
+ MVT::getSizeInBits(MVT::getVectorElementType(VT)),
SplatBits, SplatUndef, SplatSize))
return SDOperand(); // Not a constant vector, not a splat.
@@ -1601,21 +1600,21 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
case MVT::v4f32: {
uint32_t Value32 = SplatBits;
assert(SplatSize == 4
- && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
+ && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDOperand T = DAG.getConstant(Value32, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, T, T, T, T));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, T, T, T, T));
break;
}
case MVT::v2f64: {
uint64_t f64val = SplatBits;
assert(SplatSize == 8
- && "LowerBUILD_VECTOR: 64-bit float vector element: unexpected size.");
+ && "LowerBUILD_VECTOR: 64-bit float vector element: unexpected size.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDOperand T = DAG.getConstant(f64val, MVT::i64);
return DAG.getNode(ISD::BIT_CONVERT, MVT::v2f64,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
break;
}
case MVT::v16i8: {
@@ -1665,69 +1664,69 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
// Create lower vector if not a special pattern
if (!lower_special) {
- SDOperand LO32C = DAG.getConstant(lower, MVT::i32);
- LO32 = DAG.getNode(ISD::BIT_CONVERT, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- LO32C, LO32C, LO32C, LO32C));
+ SDOperand LO32C = DAG.getConstant(lower, MVT::i32);
+ LO32 = DAG.getNode(ISD::BIT_CONVERT, VT,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ LO32C, LO32C, LO32C, LO32C));
}
// Create upper vector if not a special pattern
if (!upper_special) {
- SDOperand HI32C = DAG.getConstant(upper, MVT::i32);
- HI32 = DAG.getNode(ISD::BIT_CONVERT, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- HI32C, HI32C, HI32C, HI32C));
+ SDOperand HI32C = DAG.getConstant(upper, MVT::i32);
+ HI32 = DAG.getNode(ISD::BIT_CONVERT, VT,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ HI32C, HI32C, HI32C, HI32C));
}
// If either upper or lower are special, then the two input operands are
// the same (basically, one of them is a "don't care")
if (lower_special)
- LO32 = HI32;
+ LO32 = HI32;
if (upper_special)
- HI32 = LO32;
+ HI32 = LO32;
if (lower_special && upper_special) {
- // Unhappy situation... both upper and lower are special, so punt with
- // a target constant:
+ // Unhappy situation... both upper and lower are special, so punt with
+ // a target constant:
SDOperand Zero = DAG.getConstant(0, MVT::i32);
- HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Zero, Zero,
+ HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Zero, Zero,
Zero, Zero);
}
for (int i = 0; i < 4; ++i) {
- for (int j = 0; j < 4; ++j) {
- SDOperand V;
- bool process_upper, process_lower;
- uint64_t val = 0;
-
- process_upper = (upper_special && (i & 1) == 0);
- process_lower = (lower_special && (i & 1) == 1);
-
- if (process_upper || process_lower) {
- if ((process_upper && upper == 0)
- || (process_lower && lower == 0))
- val = 0x80;
- else if ((process_upper && upper == 0xffffffff)
- || (process_lower && lower == 0xffffffff))
- val = 0xc0;
- else if ((process_upper && upper == 0x80000000)
- || (process_lower && lower == 0x80000000))
- val = (j == 0 ? 0xe0 : 0x80);
- } else
- val = i * 4 + j + ((i & 1) * 16);
-
- ShufBytes.push_back(DAG.getConstant(val, MVT::i8));
- }
+ for (int j = 0; j < 4; ++j) {
+ SDOperand V;
+ bool process_upper, process_lower;
+ uint64_t val = 0;
+
+ process_upper = (upper_special && (i & 1) == 0);
+ process_lower = (lower_special && (i & 1) == 1);
+
+ if (process_upper || process_lower) {
+ if ((process_upper && upper == 0)
+ || (process_lower && lower == 0))
+ val = 0x80;
+ else if ((process_upper && upper == 0xffffffff)
+ || (process_lower && lower == 0xffffffff))
+ val = 0xc0;
+ else if ((process_upper && upper == 0x80000000)
+ || (process_lower && lower == 0x80000000))
+ val = (j == 0 ? 0xe0 : 0x80);
+ } else
+ val = i * 4 + j + ((i & 1) * 16);
+
+ ShufBytes.push_back(DAG.getConstant(val, MVT::i8));
+ }
}
return DAG.getNode(SPUISD::SHUFB, VT, HI32, LO32,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
- &ShufBytes[0], ShufBytes.size()));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
+ &ShufBytes[0], ShufBytes.size()));
} else {
// For zero, this can be lowered efficiently via v4i32 BUILD_VECTOR
SDOperand Zero = DAG.getConstant(0, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, VT,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- Zero, Zero, Zero, Zero));
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ Zero, Zero, Zero, Zero));
}
}
}
@@ -1804,8 +1803,8 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
// Copy register's contents as index in INSERT_MASK:
SDOperand ShufMaskOp =
DAG.getNode(SPUISD::INSERT_MASK, V1.getValueType(),
- DAG.getTargetConstant(V2Elt, MVT::i32),
- DAG.getCopyFromReg(InitTempReg, VReg, PtrVT));
+ DAG.getTargetConstant(V2Elt, MVT::i32),
+ DAG.getCopyFromReg(InitTempReg, VReg, PtrVT));
// Use shuffle mask in SHUFB synthetic instruction:
return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V2, V1, ShufMaskOp);
} else {
@@ -1816,24 +1815,24 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
unsigned SrcElt;
if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
- SrcElt = 0;
+ SrcElt = 0;
else
- SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
+ SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
for (unsigned j = 0; j != BytesPerElement; ++j) {
- ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
- MVT::i8));
+ ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
+ MVT::i8));
}
}
SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
- &ResultMask[0], ResultMask.size());
+ &ResultMask[0], ResultMask.size());
return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V1, V2, VPermMask);
}
}
static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
- SDOperand Op0 = Op.getOperand(0); // Op0 = the scalar
+ SDOperand Op0 = Op.getOperand(0); // Op0 = the scalar
if (Op0.Val->getOpcode() == ISD::Constant) {
// For a constant, build the appropriate constant vector, which will
@@ -1847,7 +1846,7 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
// Create a constant vector:
switch (Op.getValueType()) {
default: assert(0 && "Unexpected constant value type in "
- "LowerSCALAR_TO_VECTOR");
+ "LowerSCALAR_TO_VECTOR");
case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
@@ -1861,7 +1860,7 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
ConstVecValues.push_back(CValue);
return DAG.getNode(ISD::BUILD_VECTOR, Op.getValueType(),
- &ConstVecValues[0], ConstVecValues.size());
+ &ConstVecValues[0], ConstVecValues.size());
} else {
// Otherwise, copy the value from one register to another:
switch (Op0.getValueType()) {
@@ -1912,24 +1911,24 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) {
SDOperand FSMBOp =
DAG.getCopyToReg(Chain, FSMBIreg,
- DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
- DAG.getConstant(0xcccc, MVT::i32)));
+ DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
+ DAG.getConstant(0xcccc, MVT::i32)));
SDOperand HHProd =
DAG.getCopyToReg(FSMBOp, HiProdReg,
- DAG.getNode(SPUISD::MPYHH, MVT::v8i16, rA, rB));
+ DAG.getNode(SPUISD::MPYHH, MVT::v8i16, rA, rB));
SDOperand HHProd_v4i32 =
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32,
- DAG.getCopyFromReg(HHProd, HiProdReg, MVT::v4i32));
+ DAG.getCopyFromReg(HHProd, HiProdReg, MVT::v4i32));
return DAG.getNode(SPUISD::SELB, MVT::v8i16,
- DAG.getNode(SPUISD::MPY, MVT::v8i16, rA, rB),
- DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(),
- DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32,
- HHProd_v4i32,
- DAG.getConstant(16, MVT::i16))),
- DAG.getCopyFromReg(FSMBOp, FSMBIreg, MVT::v4i32));
+ DAG.getNode(SPUISD::MPY, MVT::v8i16, rA, rB),
+ DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(),
+ DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32,
+ HHProd_v4i32,
+ DAG.getConstant(16, MVT::i16))),
+ DAG.getCopyFromReg(FSMBOp, FSMBIreg, MVT::v4i32));
}
// This M00sE is N@stI! (apologies to Monty Python)
@@ -1952,8 +1951,8 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) {
SDOperand LLProd =
DAG.getNode(SPUISD::MPY, MVT::v8i16,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rA),
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rB));
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rA),
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rB));
SDOperand rALH = DAG.getNode(SPUISD::VEC_SRA, MVT::v8i16, rA, c8);
@@ -1961,70 +1960,70 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) {
SDOperand LHProd =
DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16,
- DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8);
+ DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8);
SDOperand FSMBdef_2222 =
DAG.getCopyToReg(Chain, FSMBreg_2222,
- DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
- DAG.getConstant(0x2222, MVT::i32)));
+ DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
+ DAG.getConstant(0x2222, MVT::i32)));
SDOperand FSMBuse_2222 =
DAG.getCopyFromReg(FSMBdef_2222, FSMBreg_2222, MVT::v4i32);
SDOperand LoProd_1 =
DAG.getCopyToReg(Chain, LoProd_reg,
- DAG.getNode(SPUISD::SELB, MVT::v8i16, LLProd, LHProd,
- FSMBuse_2222));
+ DAG.getNode(SPUISD::SELB, MVT::v8i16, LLProd, LHProd,
+ FSMBuse_2222));
SDOperand LoProdMask = DAG.getConstant(0xffff, MVT::i32);
SDOperand LoProd =
DAG.getNode(ISD::AND, MVT::v4i32,
- DAG.getCopyFromReg(LoProd_1, LoProd_reg, MVT::v4i32),
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
- LoProdMask, LoProdMask,
- LoProdMask, LoProdMask));
+ DAG.getCopyFromReg(LoProd_1, LoProd_reg, MVT::v4i32),
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ LoProdMask, LoProdMask,
+ LoProdMask, LoProdMask));
SDOperand rAH =
DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rA), c16);
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rA), c16);
SDOperand rBH =
DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rB), c16);
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rB), c16);
SDOperand HLProd =
DAG.getNode(SPUISD::MPY, MVT::v8i16,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rAH),
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rBH));
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rAH),
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rBH));
SDOperand HHProd_1 =
DAG.getNode(SPUISD::MPY, MVT::v8i16,
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
- DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rAH, c8)),
- DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
- DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rBH, c8)));
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
+ DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rAH, c8)),
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16,
+ DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rBH, c8)));
SDOperand HHProd =
DAG.getCopyToReg(Chain, HiProd_reg,
- DAG.getNode(SPUISD::SELB, MVT::v8i16,
- HLProd,
- DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, HHProd_1, c8),
- FSMBuse_2222));
+ DAG.getNode(SPUISD::SELB, MVT::v8i16,
+ HLProd,
+ DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, HHProd_1, c8),
+ FSMBuse_2222));
SDOperand HiProd =
DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32,
- DAG.getCopyFromReg(HHProd, HiProd_reg, MVT::v4i32), c16);
+ DAG.getCopyFromReg(HHProd, HiProd_reg, MVT::v4i32), c16);
return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8,
- DAG.getNode(ISD::OR, MVT::v4i32,
- LoProd, HiProd));
+ DAG.getNode(ISD::OR, MVT::v4i32,
+ LoProd, HiProd));
}
default:
cerr << "CellSPU: Unknown vector multiplication, got "
<< MVT::getValueTypeString(Op.getValueType())
- << "\n";
+ << "\n";
abort();
/*NOTREACHED*/
}
@@ -2056,24 +2055,24 @@ static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) {
// (Floating Interpolate (FP Reciprocal Estimate B))
SDOperand BRcpl =
DAG.getCopyToReg(DAG.getEntryNode(), VRegBR,
- DAG.getNode(SPUISD::FPInterp, VT, B,
- DAG.getNode(SPUISD::FPRecipEst, VT, B)));
+ DAG.getNode(SPUISD::FPInterp, VT, B,
+ DAG.getNode(SPUISD::FPRecipEst, VT, B)));
// Computes A * BRcpl and stores in a temporary register
SDOperand AxBRcpl =
DAG.getCopyToReg(BRcpl, VRegC,
- DAG.getNode(ISD::FMUL, VT, A,
- DAG.getCopyFromReg(BRcpl, VRegBR, VT)));
+ DAG.getNode(ISD::FMUL, VT, A,
+ DAG.getCopyFromReg(BRcpl, VRegBR, VT)));
// What's the Chain variable do? It's magic!
// TODO: set Chain = Op(0).getEntryNode()
return DAG.getNode(ISD::FADD, VT,
- DAG.getCopyFromReg(AxBRcpl, VRegC, VT),
- DAG.getNode(ISD::FMUL, VT,
- DAG.getCopyFromReg(AxBRcpl, VRegBR, VT),
- DAG.getNode(ISD::FSUB, VT, A,
- DAG.getNode(ISD::FMUL, VT, B,
- DAG.getCopyFromReg(AxBRcpl, VRegC, VT)))));
+ DAG.getCopyFromReg(AxBRcpl, VRegC, VT),
+ DAG.getNode(ISD::FMUL, VT,
+ DAG.getCopyFromReg(AxBRcpl, VRegBR, VT),
+ DAG.getNode(ISD::FSUB, VT, A,
+ DAG.getNode(ISD::FMUL, VT, B,
+ DAG.getCopyFromReg(AxBRcpl, VRegC, VT)))));
}
static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
@@ -2126,7 +2125,7 @@ static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
}
assert(prefslot_begin != -1 && prefslot_end != -1 &&
- "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
+ "LowerEXTRACT_VECTOR_ELT: preferred slots uninitialized");
for (int i = 0; i < 16; ++i) {
// zero fill uppper part of preferred slot, don't care about the
@@ -2135,9 +2134,9 @@ static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
if (i <= prefslot_end) {
mask_val =
- ((i < prefslot_begin)
- ? 0x80
- : elt_byte + (i - prefslot_begin));
+ ((i < prefslot_begin)
+ ? 0x80
+ : elt_byte + (i - prefslot_begin));
ShufMask[i] = DAG.getConstant(mask_val, MVT::i8);
} else
@@ -2146,13 +2145,13 @@ static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
SDOperand ShufMaskVec =
DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
- &ShufMask[0],
- sizeof(ShufMask) / sizeof(ShufMask[0]));
+ &ShufMask[0],
+ sizeof(ShufMask) / sizeof(ShufMask[0]));
return DAG.getNode(SPUISD::EXTRACT_ELT0, VT,
- DAG.getNode(SPUISD::SHUFB, N.getValueType(),
- N, N, ShufMaskVec));
-
+ DAG.getNode(SPUISD::SHUFB, N.getValueType(),
+ N, N, ShufMaskVec));
+
}
static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
@@ -2176,7 +2175,7 @@ static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
DAG.getNode(ISD::ADD, PtrVT,
PtrBase,
DAG.getConstant(CN->getValue(),
- PtrVT))));
+ PtrVT))));
return result;
}
@@ -2283,7 +2282,7 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) {
ConstVec = Op.getOperand(1);
Arg = Op.getOperand(0);
if (ConstVec.Val->getOpcode() == ISD::BIT_CONVERT) {
- ConstVec = ConstVec.getOperand(0);
+ ConstVec = ConstVec.getOperand(0);
}
}
}
@@ -2295,19 +2294,19 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) {
int SplatSize;
if (!GetConstantBuildVectorBits(ConstVec.Val, VectorBits, UndefBits)
- && isConstantSplat(VectorBits, UndefBits,
- MVT::getSizeInBits(MVT::getVectorElementType(VT)),
- SplatBits, SplatUndef, SplatSize)) {
+ && isConstantSplat(VectorBits, UndefBits,
+ MVT::getSizeInBits(MVT::getVectorElementType(VT)),
+ SplatBits, SplatUndef, SplatSize)) {
SDOperand tcVec[16];
SDOperand tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
const size_t tcVecSize = sizeof(tcVec) / sizeof(tcVec[0]);
// Turn the BUILD_VECTOR into a set of target constants:
for (size_t i = 0; i < tcVecSize; ++i)
- tcVec[i] = tc;
+ tcVec[i] = tc;
return DAG.getNode(Op.Val->getOpcode(), VT, Arg,
- DAG.getNode(ISD::BUILD_VECTOR, VT, tcVec, tcVecSize));
+ DAG.getNode(ISD::BUILD_VECTOR, VT, tcVec, tcVecSize));
}
}
@@ -2321,7 +2320,7 @@ static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, unsigned VT,
default:
cerr << "CellSPU: Unknown LowerMUL value type, got "
<< MVT::getValueTypeString(Op.getValueType())
- << "\n";
+ << "\n";
abort();
/*NOTREACHED*/
@@ -2330,10 +2329,10 @@ static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, unsigned VT,
SDOperand rB = Op.getOperand(1);
return DAG.getNode(ISD::ADD, MVT::i32,
- DAG.getNode(ISD::ADD, MVT::i32,
- DAG.getNode(SPUISD::MPYH, MVT::i32, rA, rB),
- DAG.getNode(SPUISD::MPYH, MVT::i32, rB, rA)),
- DAG.getNode(SPUISD::MPYU, MVT::i32, rA, rB));
+ DAG.getNode(ISD::ADD, MVT::i32,
+ DAG.getNode(SPUISD::MPYH, MVT::i32, rA, rB),
+ DAG.getNode(SPUISD::MPYH, MVT::i32, rB, rA)),
+ DAG.getNode(SPUISD::MPYU, MVT::i32, rA, rB));
}
}
@@ -2379,18 +2378,18 @@ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) {
// CNTB_reg, SUM1_reg become associated:
SDOperand CNTB_result =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, CNTB, Elt0);
-
+
SDOperand CNTB_rescopy =
DAG.getCopyToReg(CNTB_result, CNTB_reg, CNTB_result);
SDOperand Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i16);
return DAG.getNode(ISD::AND, MVT::i16,
- DAG.getNode(ISD::ADD, MVT::i16,
- DAG.getNode(ISD::SRL, MVT::i16,
- Tmp1, Shift1),
- Tmp1),
- Mask0);
+ DAG.getNode(ISD::ADD, MVT::i16,
+ DAG.getNode(ISD::SRL, MVT::i16,
+ Tmp1, Shift1),
+ Tmp1),
+ Mask0);
}
case MVT::i32: {
@@ -2413,28 +2412,28 @@ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) {
// CNTB_reg, SUM1_reg become associated:
SDOperand CNTB_result =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, CNTB, Elt0);
-
+
SDOperand CNTB_rescopy =
DAG.getCopyToReg(CNTB_result, CNTB_reg, CNTB_result);
SDOperand Comp1 =
DAG.getNode(ISD::SRL, MVT::i32,
- DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32), Shift1);
+ DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32), Shift1);
SDOperand Sum1 =
DAG.getNode(ISD::ADD, MVT::i32,
- Comp1, DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32));
+ Comp1, DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32));
SDOperand Sum1_rescopy =
DAG.getCopyToReg(CNTB_result, SUM1_reg, Sum1);
SDOperand Comp2 =
DAG.getNode(ISD::SRL, MVT::i32,
- DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32),
- Shift2);
+ DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32),
+ Shift2);
SDOperand Sum2 =
DAG.getNode(ISD::ADD, MVT::i32, Comp2,
- DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32));
+ DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32));
return DAG.getNode(ISD::AND, MVT::i32, Sum2, Mask0);
}
@@ -2558,7 +2557,7 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
#endif
const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
SelectionDAG &DAG = DCI.DAG;
- SDOperand N0 = N->getOperand(0); // everything has at least one operand
+ SDOperand N0 = N->getOperand(0); // everything has at least one operand
switch (N->getOpcode()) {
default: break;
@@ -2683,11 +2682,11 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
void
SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
- uint64_t Mask,
- uint64_t &KnownZero,
- uint64_t &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth ) const {
+ uint64_t Mask,
+ uint64_t &KnownZero,
+ uint64_t &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth ) const {
KnownZero = 0;
KnownOne = 0;
}
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h
index 916f2c9..3ac768a 100644
--- a/lib/Target/CellSPU/SPUISelLowering.h
+++ b/lib/Target/CellSPU/SPUISelLowering.h
@@ -38,34 +38,34 @@ namespace llvm {
CALL, ///< CALL instruction
SHUFB, ///< Vector shuffle (permute)
INSERT_MASK, ///< Insert element shuffle mask
- CNTB, ///< Count leading ones in bytes
+ CNTB, ///< Count leading ones in bytes
PROMOTE_SCALAR, ///< Promote scalar->vector
EXTRACT_ELT0, ///< Extract element 0
- EXTRACT_ELT0_CHAINED, ///< Extract element 0, with chain
+ EXTRACT_ELT0_CHAINED, ///< Extract element 0, with chain
EXTRACT_I1_ZEXT, ///< Extract element 0 as i1, zero extend
EXTRACT_I1_SEXT, ///< Extract element 0 as i1, sign extend
EXTRACT_I8_ZEXT, ///< Extract element 0 as i8, zero extend
EXTRACT_I8_SEXT, ///< Extract element 0 as i8, sign extend
- MPY, ///< 16-bit Multiply (low parts of a 32-bit)
- MPYU, ///< Multiply Unsigned
- MPYH, ///< Multiply High
- MPYHH, ///< Multiply High-High
- VEC_SHL, ///< Vector shift left
- VEC_SRL, ///< Vector shift right (logical)
- VEC_SRA, ///< Vector shift right (arithmetic)
- VEC_ROTL, ///< Vector rotate left
- VEC_ROTR, ///< Vector rotate right
- ROTBYTES_RIGHT_Z, ///< Vector rotate right, by bytes, zero fill
+ MPY, ///< 16-bit Multiply (low parts of a 32-bit)
+ MPYU, ///< Multiply Unsigned
+ MPYH, ///< Multiply High
+ MPYHH, ///< Multiply High-High
+ VEC_SHL, ///< Vector shift left
+ VEC_SRL, ///< Vector shift right (logical)
+ VEC_SRA, ///< Vector shift right (arithmetic)
+ VEC_ROTL, ///< Vector rotate left
+ VEC_ROTR, ///< Vector rotate right
+ ROTBYTES_RIGHT_Z, ///< Vector rotate right, by bytes, zero fill
ROTBYTES_RIGHT_S, ///< Vector rotate right, by bytes, sign fill
- ROTBYTES_LEFT, ///< Rotate bytes (loads -> ROTQBYI)
- ROTBYTES_LEFT_CHAINED, ///< Rotate bytes (loads -> ROTQBYI), with chain
- FSMBI, ///< Form Select Mask for Bytes, Immediate
- SELB, ///< Select bits -> (b & mask) | (a & ~mask)
- SFPConstant, ///< Single precision floating point constant
+ ROTBYTES_LEFT, ///< Rotate bytes (loads -> ROTQBYI)
+ ROTBYTES_LEFT_CHAINED, ///< Rotate bytes (loads -> ROTQBYI), with chain
+ FSMBI, ///< Form Select Mask for Bytes, Immediate
+ SELB, ///< Select bits -> (b & mask) | (a & ~mask)
+ SFPConstant, ///< Single precision floating point constant
FPInterp, ///< Floating point interpolate
- FPRecipEst, ///< Floating point reciprocal estimate
- SEXT32TO64, ///< Sign-extended 32-bit const -> 64-bits
- LAST_SPUISD ///< Last user-defined instruction
+ FPRecipEst, ///< Floating point reciprocal estimate
+ SEXT32TO64, ///< Sign-extended 32-bit const -> 64-bits
+ LAST_SPUISD ///< Last user-defined instruction
};
}
diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp
index e72cd12..ac9b9b8 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -408,8 +408,8 @@ SPUInstrInfo::foldMemoryOperand(MachineInstr *MI,
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
- NewMI = addFrameReference(BuildMI(TII.get(SPU::STQDr32)).addReg(InReg),
- FrameIndex);
+ NewMI = addFrameReference(BuildMI(TII.get(SPU::STQDr32)).addReg(InReg),
+ FrameIndex);
}
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
diff --git a/lib/Target/CellSPU/SPUInstrInfo.h b/lib/Target/CellSPU/SPUInstrInfo.h
index e2ecf9b..10c39a0 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.h
+++ b/lib/Target/CellSPU/SPUInstrInfo.h
@@ -40,8 +40,8 @@ namespace llvm {
// leave the source and dest operands in the passed parameters.
//
virtual bool isMoveInstr(const MachineInstr& MI,
- unsigned& sourceReg,
- unsigned& destReg) const;
+ unsigned& sourceReg,
+ unsigned& destReg) const;
unsigned isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const;
unsigned isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const;
@@ -60,9 +60,9 @@ namespace llvm {
//! Store a register to an address, based on its register class
virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
//! Load a register from a stack slot, based on its register class.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
@@ -72,8 +72,8 @@ namespace llvm {
//! Loqad a register from an address, based on its register class
virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
//! Fold spills into load/store instructions
diff --git a/lib/Target/CellSPU/SPUInstrInfo.td b/lib/Target/CellSPU/SPUInstrInfo.td
index b21468a..196d241 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.td
+++ b/lib/Target/CellSPU/SPUInstrInfo.td
@@ -194,16 +194,16 @@ multiclass StoreDForms
class StoreAFormVec<ValueType vectype>
: RI16Form<0b0010010, (outs), (ins VECREG:$rT, addr256k:$src),
- "stqa\t$rT, $src",
- LoadStore,
- [(store (vectype VECREG:$rT), aform_addr:$src)]>
+ "stqa\t$rT, $src",
+ LoadStore,
+ [(store (vectype VECREG:$rT), aform_addr:$src)]>
{ }
class StoreAForm<RegisterClass rclass>
: RI16Form<0b001001, (outs), (ins rclass:$rT, addr256k:$src),
- "stqa\t$rT, $src",
- LoadStore,
- [(store rclass:$rT, aform_addr:$src)]>
+ "stqa\t$rT, $src",
+ LoadStore,
+ [(store rclass:$rT, aform_addr:$src)]>
{ }
multiclass StoreAForms
@@ -226,16 +226,16 @@ multiclass StoreAForms
class StoreXFormVec<ValueType vectype>
: RRForm<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
- "stqx\t$rT, $src",
- LoadStore,
- [(store (vectype VECREG:$rT), xform_addr:$src)]>
+ "stqx\t$rT, $src",
+ LoadStore,
+ [(store (vectype VECREG:$rT), xform_addr:$src)]>
{ }
class StoreXForm<RegisterClass rclass>
: RRForm<0b00100100, (outs), (ins rclass:$rT, memrr:$src),
- "stqx\t$rT, $src",
- LoadStore,
- [(store rclass:$rT, xform_addr:$src)]>
+ "stqx\t$rT, $src",
+ LoadStore,
+ [(store rclass:$rT, xform_addr:$src)]>
{ }
multiclass StoreXForms
@@ -460,9 +460,9 @@ def IOHLlo:
class FSMBIVec<ValueType vectype>
: RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
- "fsmbi\t$rT, $val",
- SelectOp,
- [(set (vectype VECREG:$rT), (SPUfsmbi immU16:$val))]>
+ "fsmbi\t$rT, $val",
+ SelectOp,
+ [(set (vectype VECREG:$rT), (SPUfsmbi immU16:$val))]>
{ }
multiclass FSMBIs
@@ -1151,7 +1151,7 @@ def ORv16i8_i8:
[/* no pattern */]>;
def : Pat<(v16i8 (SPUpromote_scalar R8C:$rA)),
- (ORv16i8_i8 R8C:$rA, R8C:$rA)>;
+ (ORv16i8_i8 R8C:$rA, R8C:$rA)>;
def ORv8i16_i16:
RRForm<0b10000010000, (outs VECREG:$rT), (ins R16C:$rA, R16C:$rB),
@@ -1200,10 +1200,10 @@ def ORi8_v16i8:
[/* no pattern */]>;
def : Pat<(SPUextract_elt0 (v16i8 VECREG:$rA)),
- (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
+ (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
def : Pat<(SPUextract_elt0_chained (v16i8 VECREG:$rA)),
- (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
+ (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
def ORi16_v8i16:
RRForm<0b10000010000, (outs R16C:$rT), (ins VECREG:$rA, VECREG:$rB),
@@ -2311,7 +2311,7 @@ def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i16 imm:$val)),
(ROTHMIv8i16 VECREG:$rA, imm:$val)>;
def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i8 imm:$val)),
- (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
+ (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
def ROTHMIr16:
RI7Form<0b10111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm:$val),
diff --git a/lib/Target/CellSPU/SPUNodes.td b/lib/Target/CellSPU/SPUNodes.td
index c231bef..a58a552 100644
--- a/lib/Target/CellSPU/SPUNodes.td
+++ b/lib/Target/CellSPU/SPUNodes.td
@@ -184,10 +184,6 @@ def SPUaform : SDNode<"SPUISD::AFormAddr", SDTIntBinOp, []>;
// Indirect [D-Form "imm($reg)" and X-Form "$reg($reg)"] addresses
def SPUindirect : SDNode<"SPUISD::IndirectAddr", SDTIntBinOp, []>;
-// Load result node
-def SPUload_result : SDTypeProfile<1, 3, []>;
-def SPUldresult : SDNode<"SPUISD::LDRESULT", SPUload_result, [SDNPHasChain]>;
-
// SPU 32-bit sign-extension to 64-bits
def SPUsext32_to_64: SDNode<"SPUISD::SEXT32TO64", SDTIntExtendOp, []>;
diff --git a/lib/Target/CellSPU/SPURegisterInfo.cpp b/lib/Target/CellSPU/SPURegisterInfo.cpp
index 90606cb..3c89c7f 100644
--- a/lib/Target/CellSPU/SPURegisterInfo.cpp
+++ b/lib/Target/CellSPU/SPURegisterInfo.cpp
@@ -289,9 +289,9 @@ SPURegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const
*/
BitVector SPURegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
- Reserved.set(SPU::R0); // LR
- Reserved.set(SPU::R1); // SP
- Reserved.set(SPU::R2); // environment pointer
+ Reserved.set(SPU::R0); // LR
+ Reserved.set(SPU::R1); // SP
+ Reserved.set(SPU::R2); // environment pointer
return Reserved;
}
@@ -331,7 +331,7 @@ SPURegisterInfo::eliminateCallFramePseudoInstr(MachineFunction &MF,
void
SPURegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
- RegScavenger *RS) const
+ RegScavenger *RS) const
{
unsigned i = 0;
MachineInstr &MI = *II;
@@ -464,10 +464,10 @@ void SPURegisterInfo::emitPrologue(MachineFunction &MF) const
if (isS10Constant(FrameSize)) {
// Spill $sp to adjusted $sp
BuildMI(MBB, MBBI, TII.get(SPU::STQDr32), SPU::R1).addImm(FrameSize)
- .addReg(SPU::R1);
+ .addReg(SPU::R1);
// Adjust $sp by required amout
BuildMI(MBB, MBBI, TII.get(SPU::AIr32), SPU::R1).addReg(SPU::R1)
- .addImm(FrameSize);
+ .addImm(FrameSize);
} else if (FrameSize <= (1 << 16) - 1 && FrameSize >= -(1 << 16)) {
// Frame size can be loaded into ILr32n, so temporarily spill $r2 and use
// $r2 to adjust $sp:
@@ -475,7 +475,7 @@ void SPURegisterInfo::emitPrologue(MachineFunction &MF) const
.addImm(-16)
.addReg(SPU::R1);
BuildMI(MBB, MBBI, TII.get(SPU::ILr32), SPU::R2)
- .addImm(FrameSize);
+ .addImm(FrameSize);
BuildMI(MBB, MBBI, TII.get(SPU::STQDr32), SPU::R1)
.addReg(SPU::R2)
.addReg(SPU::R1);
@@ -504,12 +504,12 @@ void SPURegisterInfo::emitPrologue(MachineFunction &MF) const
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
- unsigned Reg = CSI[I].getReg();
- if (Reg == SPU::R0) continue;
- MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
- MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc));
+ int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
+ unsigned Reg = CSI[I].getReg();
+ if (Reg == SPU::R0) continue;
+ MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
+ MachineLocation CSSrc(Reg);
+ Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc));
}
// Mark effective beginning of when frame pointer is ready.
@@ -556,7 +556,7 @@ SPURegisterInfo::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
.addReg(SPU::R1);
BuildMI(MBB, MBBI, TII.get(SPU::AIr32), SPU::R1)
.addReg(SPU::R1)
- .addImm(FrameSize);
+ .addImm(FrameSize);
} else if (FrameSize <= (1 << 16) - 1 && FrameSize >= -(1 << 16)) {
// Frame size can be loaded into ILr32n, so temporarily spill $r2 and use
// $r2 to adjust $sp:
@@ -564,7 +564,7 @@ SPURegisterInfo::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
.addImm(16)
.addReg(SPU::R1);
BuildMI(MBB, MBBI, TII.get(SPU::ILr32), SPU::R2)
- .addImm(FrameSize);
+ .addImm(FrameSize);
BuildMI(MBB, MBBI, TII.get(SPU::Ar32), SPU::R1)
.addReg(SPU::R1)
.addReg(SPU::R2);
diff --git a/lib/Target/CellSPU/SPURegisterInfo.h b/lib/Target/CellSPU/SPURegisterInfo.h
index b806e80..aa2f036 100644
--- a/lib/Target/CellSPU/SPURegisterInfo.h
+++ b/lib/Target/CellSPU/SPURegisterInfo.h
@@ -41,7 +41,7 @@ namespace llvm {
static unsigned getRegisterNumbering(unsigned RegEnum);
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- unsigned DestReg, const MachineInstr *Orig) const;
+ unsigned DestReg, const MachineInstr *Orig) const;
//! Return the array of callee-saved registers
virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF) const;
@@ -66,7 +66,7 @@ namespace llvm {
void determineFrameLayout(MachineFunction &MF) const;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
+ RegScavenger *RS = NULL) const;
//! Emit the function prologue
void emitPrologue(MachineFunction &MF) const;
//! Emit the function epilogue
diff --git a/lib/Target/CellSPU/SPUSubtarget.h b/lib/Target/CellSPU/SPUSubtarget.h
index 59196fa..2ee7bb8 100644
--- a/lib/Target/CellSPU/SPUSubtarget.h
+++ b/lib/Target/CellSPU/SPUSubtarget.h
@@ -84,7 +84,7 @@ namespace llvm {
/// properties of this subtarget.
const char *getTargetDataString() const {
return "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128"
- "-i16:16:128-i8:8:128-i1:8:128-a:0:128-v128:128:128"
+ "-i16:16:128-i8:8:128-i1:8:128-a:0:128-v128:128:128"
"-s:128:128";
}
};
diff --git a/lib/Target/CellSPU/SPUTargetMachine.cpp b/lib/Target/CellSPU/SPUTargetMachine.cpp
index 47f785a..aca949a 100644
--- a/lib/Target/CellSPU/SPUTargetMachine.cpp
+++ b/lib/Target/CellSPU/SPUTargetMachine.cpp
@@ -50,7 +50,7 @@ SPUTargetMachine::getModuleMatchQuality(const Module &M)
|| (TT.size() >= 8 && std::string(TT.begin(), TT.begin()+8) == "cellspu-"))
return 20;
- return 0; // No match at all...
+ return 0; // No match at all...
}
SPUTargetMachine::SPUTargetMachine(const Module &M, const std::string &FS)