diff options
Diffstat (limited to 'lib/Target/R600')
82 files changed, 3441 insertions, 4285 deletions
diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h index 9792bd8..f284291 100644 --- a/lib/Target/R600/AMDGPU.h +++ b/lib/Target/R600/AMDGPU.h @@ -11,21 +11,28 @@ #ifndef AMDGPU_H #define AMDGPU_H -#include "AMDGPUTargetMachine.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetMachine.h" namespace llvm { -class FunctionPass; +class AMDGPUInstrPrinter; class AMDGPUTargetMachine; +class FunctionPass; +class MCAsmInfo; +class raw_ostream; +class Target; +class TargetMachine; // R600 Passes -FunctionPass* createR600KernelParametersPass(const DataLayout *TD); +FunctionPass *createR600VectorRegMerger(TargetMachine &tm); +FunctionPass *createR600TextureIntrinsicsReplacer(); FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm); FunctionPass *createR600EmitClauseMarkers(TargetMachine &tm); FunctionPass *createR600Packetizer(TargetMachine &tm); FunctionPass *createR600ControlFlowFinalizer(TargetMachine &tm); +FunctionPass *createAMDGPUCFGPreparationPass(TargetMachine &tm); +FunctionPass *createAMDGPUCFGStructurizerPass(TargetMachine &tm); // SI Passes FunctionPass *createSIAnnotateControlFlowPass(); @@ -36,7 +43,10 @@ FunctionPass *createSIInsertWaits(TargetMachine &tm); // Passes common to R600 and SI Pass *createAMDGPUStructurizeCFGPass(); FunctionPass *createAMDGPUConvertToISAPass(TargetMachine &tm); -FunctionPass* createAMDGPUIndirectAddressingPass(TargetMachine &tm); +FunctionPass *createAMDGPUIndirectAddressingPass(TargetMachine &tm); +FunctionPass *createAMDGPUISelDag(TargetMachine &tm); + +extern Target TheAMDGPUTarget; } // End namespace llvm @@ -49,4 +59,41 @@ namespace ShaderType { }; } +/// OpenCL uses address spaces to differentiate between +/// various memory regions on the hardware. On the CPU +/// all of the address spaces point to the same memory, +/// however on the GPU, each address space points to +/// a seperate piece of memory that is unique from other +/// memory locations. +namespace AMDGPUAS { +enum AddressSpaces { + PRIVATE_ADDRESS = 0, ///< Address space for private memory. + GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0). + CONSTANT_ADDRESS = 2, ///< Address space for constant memory + LOCAL_ADDRESS = 3, ///< Address space for local memory. + REGION_ADDRESS = 4, ///< Address space for region memory. + ADDRESS_NONE = 5, ///< Address space for unknown memory. + PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0) + PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1) + CONSTANT_BUFFER_0 = 8, + CONSTANT_BUFFER_1 = 9, + CONSTANT_BUFFER_2 = 10, + CONSTANT_BUFFER_3 = 11, + CONSTANT_BUFFER_4 = 12, + CONSTANT_BUFFER_5 = 13, + CONSTANT_BUFFER_6 = 14, + CONSTANT_BUFFER_7 = 15, + CONSTANT_BUFFER_8 = 16, + CONSTANT_BUFFER_9 = 17, + CONSTANT_BUFFER_10 = 18, + CONSTANT_BUFFER_11 = 19, + CONSTANT_BUFFER_12 = 20, + CONSTANT_BUFFER_13 = 21, + CONSTANT_BUFFER_14 = 22, + CONSTANT_BUFFER_15 = 23, + LAST_ADDRESS = 24 +}; + +} // namespace AMDGPUAS + #endif // AMDGPU_H diff --git a/lib/Target/R600/AMDGPU.td b/lib/Target/R600/AMDGPU.td index 1a26c77..0048e25 100644 --- a/lib/Target/R600/AMDGPU.td +++ b/lib/Target/R600/AMDGPU.td @@ -10,6 +10,79 @@ // Include AMDIL TD files include "AMDILBase.td" +//===----------------------------------------------------------------------===// +// Subtarget Features +//===----------------------------------------------------------------------===// + +// Debugging Features + +def FeatureDumpCode : SubtargetFeature <"DumpCode", + "DumpCode", + "true", + "Dump MachineInstrs in the CodeEmitter">; + +// Target features + +def FeatureFP64 : SubtargetFeature<"fp64", + "FP64", + "true", + "Enable 64bit double precision operations">; + +def Feature64BitPtr : SubtargetFeature<"64BitPtr", + "Is64bit", + "true", + "Specify if 64bit addressing should be used.">; + +def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr", + "Is32on64bit", + "false", + "Specify if 64bit sized pointers with 32bit addressing should be used.">; + +def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst", + "R600ALUInst", + "false", + "Older version of ALU instructions encoding.">; + +def FeatureVertexCache : SubtargetFeature<"HasVertexCache", + "HasVertexCache", + "true", + "Specify use of dedicated vertex cache.">; + +def FeatureCaymanISA : SubtargetFeature<"caymanISA", + "CaymanISA", + "true", + "Use Cayman ISA">; + +class SubtargetFeatureFetchLimit <string Value> : + SubtargetFeature <"fetch"#Value, + "TexVTXClauseSize", + Value, + "Limit the maximum number of fetches in a clause to "#Value>; + +def FeatureFetchLimit8 : SubtargetFeatureFetchLimit <"8">; +def FeatureFetchLimit16 : SubtargetFeatureFetchLimit <"16">; + +class SubtargetFeatureGeneration <string Value, + list<SubtargetFeature> Implies> : + SubtargetFeature <Value, "Gen", "AMDGPUSubtarget::"#Value, + Value#" GPU generation", Implies>; + +def FeatureR600 : SubtargetFeatureGeneration<"R600", + [FeatureR600ALUInst, FeatureFetchLimit8]>; + +def FeatureR700 : SubtargetFeatureGeneration<"R700", + [FeatureFetchLimit16]>; + +def FeatureEvergreen : SubtargetFeatureGeneration<"EVERGREEN", + [FeatureFetchLimit16]>; + +def FeatureNorthernIslands : SubtargetFeatureGeneration<"NORTHERN_ISLANDS", + [FeatureFetchLimit16]>; + +def FeatureSouthernIslands : SubtargetFeatureGeneration<"SOUTHERN_ISLANDS", + [Feature64BitPtr, FeatureFP64]>; + +//===----------------------------------------------------------------------===// def AMDGPUInstrInfo : InstrInfo { let guessInstructionProperties = 1; diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp index c915f50..f720c7e 100644 --- a/lib/Target/R600/AMDGPUAsmPrinter.cpp +++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp @@ -19,11 +19,12 @@ #include "AMDGPUAsmPrinter.h" #include "AMDGPU.h" +#include "R600Defines.h" +#include "R600MachineFunctionInfo.h" +#include "R600RegisterInfo.h" #include "SIDefines.h" #include "SIMachineFunctionInfo.h" #include "SIRegisterInfo.h" -#include "R600MachineFunctionInfo.h" -#include "R600RegisterInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCStreamer.h" @@ -62,7 +63,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { ELF::SHT_PROGBITS, 0, SectionKind::getReadOnly()); OutStreamer.SwitchSection(ConfigSection); - if (STM.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { + if (STM.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { EmitProgramInfoSI(MF); } else { EmitProgramInfoR600(MF); @@ -78,6 +79,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) { const R600RegisterInfo * RI = static_cast<const R600RegisterInfo*>(TM.getRegisterInfo()); R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); + const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>(); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { @@ -101,9 +103,33 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) { } } } - OutStreamer.EmitIntValue(MaxGPR + 1, 4); - OutStreamer.EmitIntValue(MFI->StackSize, 4); - OutStreamer.EmitIntValue(killPixel, 4); + + unsigned RsrcReg; + if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) { + // Evergreen / Northern Islands + switch (MFI->ShaderType) { + default: // Fall through + case ShaderType::COMPUTE: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break; + case ShaderType::GEOMETRY: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break; + case ShaderType::PIXEL: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break; + case ShaderType::VERTEX: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break; + } + } else { + // R600 / R700 + switch (MFI->ShaderType) { + default: // Fall through + case ShaderType::GEOMETRY: // Fall through + case ShaderType::COMPUTE: // Fall through + case ShaderType::VERTEX: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break; + case ShaderType::PIXEL: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break; + } + } + + OutStreamer.EmitIntValue(RsrcReg, 4); + OutStreamer.EmitIntValue(S_NUM_GPRS(MaxGPR + 1) | + S_STACK_SIZE(MFI->StackSize), 4); + OutStreamer.EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4); + OutStreamer.EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4); } void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) { diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td index 9c30515..84e4f3a 100644 --- a/lib/Target/R600/AMDGPUCallingConv.td +++ b/lib/Target/R600/AMDGPUCallingConv.td @@ -32,17 +32,21 @@ def CC_SI : CallingConv<[ VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15, VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23, VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31 - ]>>>, + ]>>> + +]>; - // This is the default for i64 values. - // XXX: We should change this once clang understands the CC_AMDGPU. - CCIfType<[i64], CCAssignToRegWithShadow< - [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ], - [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ] - >> +// Calling convention for SI compute kernels +def CC_SI_Kernel : CallingConv<[ + CCIfType<[i64], CCAssignToStack <8, 4>>, + CCIfType<[i32, f32], CCAssignToStack <4, 4>>, + CCIfType<[i16], CCAssignToStack <2, 4>>, + CCIfType<[i8], CCAssignToStack <1, 4>> ]>; def CC_AMDGPU : CallingConv<[ - CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().device()"# - "->getGeneration() == AMDGPUDeviceInfo::HD7XXX", CCDelegateTo<CC_SI>> + CCIf<"State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"# + "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_SI_Kernel>>, + CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"# + ".getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_SI>> ]>; diff --git a/lib/Target/R600/AMDGPUFrameLowering.cpp b/lib/Target/R600/AMDGPUFrameLowering.cpp index 815d6f7..40f14d2 100644 --- a/lib/Target/R600/AMDGPUFrameLowering.cpp +++ b/lib/Target/R600/AMDGPUFrameLowering.cpp @@ -78,27 +78,8 @@ int AMDGPUFrameLowering::getFrameIndexOffset(const MachineFunction &MF, int UpperBound = FI == -1 ? MFI->getNumObjects() : FI; for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) { - const AllocaInst *Alloca = MFI->getObjectAllocation(i); - unsigned ArrayElements; - const Type *AllocaType = Alloca->getAllocatedType(); - const Type *ElementType; - - if (AllocaType->isArrayTy()) { - ArrayElements = AllocaType->getArrayNumElements(); - ElementType = AllocaType->getArrayElementType(); - } else { - ArrayElements = 1; - ElementType = AllocaType; - } - - unsigned VectorElements; - if (ElementType->isVectorTy()) { - VectorElements = ElementType->getVectorNumElements(); - } else { - VectorElements = 1; - } - - Offset += (VectorElements / getStackWidth(MF)) * ArrayElements; + unsigned Size = MFI->getObjectSize(i); + Offset += (Size / (getStackWidth(MF) * 4)); } return Offset; } diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index a266df5..02d6fab 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -14,9 +14,11 @@ //===----------------------------------------------------------------------===// #include "AMDGPUISelLowering.h" +#include "AMDGPU.h" #include "AMDGPURegisterInfo.h" -#include "AMDILIntrinsicInfo.h" #include "AMDGPUSubtarget.h" +#include "AMDILIntrinsicInfo.h" +#include "SIMachineFunctionInfo.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -46,6 +48,9 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) : setOperationAction(ISD::FFLOOR, MVT::f32, Legal); setOperationAction(ISD::FRINT, MVT::f32, Legal); + // The hardware supports ROTR, but not ROTL + setOperationAction(ISD::ROTL, MVT::i32, Expand); + // Lower floating point store/load to integer store/load to reduce the number // of patterns in tablegen. setOperationAction(ISD::STORE, MVT::f32, Promote); @@ -83,7 +88,7 @@ SDValue AMDGPUTargetLowering::LowerReturn( bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const { + SDLoc DL, SelectionDAG &DAG) const { return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain); } @@ -114,7 +119,7 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); switch (IntrinsicID) { @@ -154,7 +159,7 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT), Op.getOperand(1)); @@ -166,7 +171,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op, /// LRP(a, b, c) = muladd(a, b, (1 - a) * c) SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT, DAG.getConstantFP(1.0f, MVT::f32), @@ -181,7 +186,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op, /// \brief Generate Min/Max node SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue LHS = Op.getOperand(0); @@ -242,7 +247,7 @@ SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op, SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue Num = Op.getOperand(0); diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h index c2a79ea..69a0ac9 100644 --- a/lib/Target/R600/AMDGPUISelLowering.h +++ b/lib/Target/R600/AMDGPUISelLowering.h @@ -33,8 +33,9 @@ protected: /// MachineFunction. /// /// \returns a RegisterSDNode representing Reg. - SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, - unsigned Reg, EVT VT) const; + virtual SDValue CreateLiveInRegister(SelectionDAG &DAG, + const TargetRegisterClass *RC, + unsigned Reg, EVT VT) const; bool isHWTrueValue(SDValue Op) const; bool isHWFalseValue(SDValue Op) const; @@ -49,7 +50,7 @@ public: bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, - DebugLoc DL, SelectionDAG &DAG) const; + SDLoc DL, SelectionDAG &DAG) const; virtual SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { CLI.Callee.dump(); @@ -115,8 +116,6 @@ enum { RET_FLAG, BRANCH_COND, // End AMDIL ISD Opcodes - BITALIGN, - BUFFER_STORE, DWORDADDR, FRACT, FMAX, @@ -126,6 +125,8 @@ enum { SMIN, UMIN, URECIP, + DOT4, + TEXTURE_FETCH, EXPORT, CONST_ADDRESS, REGISTER_LOAD, diff --git a/lib/Target/R600/AMDGPUIndirectAddressing.cpp b/lib/Target/R600/AMDGPUIndirectAddressing.cpp index ed6c8ec..3ce3ecf 100644 --- a/lib/Target/R600/AMDGPUIndirectAddressing.cpp +++ b/lib/Target/R600/AMDGPUIndirectAddressing.cpp @@ -39,7 +39,7 @@ private: public: AMDGPUIndirectAddressingPass(TargetMachine &tm) : MachineFunctionPass(ID), - TII(static_cast<const AMDGPUInstrInfo*>(tm.getInstrInfo())) + TII(0) { } virtual bool runOnMachineFunction(MachineFunction &MF); @@ -59,6 +59,8 @@ FunctionPass *llvm::createAMDGPUIndirectAddressingPass(TargetMachine &tm) { bool AMDGPUIndirectAddressingPass::runOnMachineFunction(MachineFunction &MF) { MachineRegisterInfo &MRI = MF.getRegInfo(); + TII = static_cast<const AMDGPUInstrInfo*>(MF.getTarget().getInstrInfo()); + int IndirectBegin = TII->getIndirectIndexBegin(MF); int IndirectEnd = TII->getIndirectIndexEnd(MF); @@ -224,7 +226,7 @@ bool AMDGPUIndirectAddressingPass::runOnMachineFunction(MachineFunction &MF) { unsigned LiveAddress = RegisterAddressMap[Reg]; // Chain the live-ins if (LiveAddressRegisterMap.find(LiveAddress) != - RegisterAddressMap.end()) { + LiveAddressRegisterMap.end()) { MI.addOperand(MachineOperand::CreateReg( LiveAddressRegisterMap[LiveAddress], false, // isDef diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp index 30f736c..31b3002 100644 --- a/lib/Target/R600/AMDGPUInstrInfo.cpp +++ b/lib/Target/R600/AMDGPUInstrInfo.cpp @@ -16,7 +16,6 @@ #include "AMDGPUInstrInfo.h" #include "AMDGPURegisterInfo.h" #include "AMDGPUTargetMachine.h" -#include "AMDIL.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -28,7 +27,7 @@ using namespace llvm; AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm) - : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { } + : AMDGPUGenInstrInfo(0,0), RI(tm), TM(tm) { } const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const { return RI; @@ -99,27 +98,6 @@ bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter, return false; } -MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) { - MachineBasicBlock::iterator tmp = MBB->end(); - if (!MBB->size()) { - return MBB->end(); - } - while (--tmp) { - if (tmp->getOpcode() == AMDGPU::ENDLOOP - || tmp->getOpcode() == AMDGPU::ENDIF - || tmp->getOpcode() == AMDGPU::ELSE) { - if (tmp == MBB->begin()) { - return tmp; - } else { - continue; - } - } else { - return ++tmp; - } - } - return MBB->end(); -} - void AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, diff --git a/lib/Target/R600/AMDGPUInstrInfo.td b/lib/Target/R600/AMDGPUInstrInfo.td index b66ae87..48d89dd 100644 --- a/lib/Target/R600/AMDGPUInstrInfo.td +++ b/lib/Target/R600/AMDGPUInstrInfo.td @@ -23,12 +23,6 @@ def AMDGPUDTIntTernaryOp : SDTypeProfile<1, 3, [ // AMDGPU DAG Nodes // -// out = ((a << 32) | b) >> c) -// -// Can be used to optimize rtol: -// rotl(a, b) = bitalign(a, a, 32 - b) -def AMDGPUbitalign : SDNode<"AMDGPUISD::BITALIGN", AMDGPUDTIntTernaryOp>; - // This argument to this node is a dword address. def AMDGPUdwordaddr : SDNode<"AMDGPUISD::DWORDADDR", SDTIntUnaryOp>; @@ -71,8 +65,6 @@ def AMDGPUumin : SDNode<"AMDGPUISD::UMIN", SDTIntBinOp, // e is rounding error def AMDGPUurecip : SDNode<"AMDGPUISD::URECIP", SDTIntUnaryOp>; -def fpow : SDNode<"ISD::FPOW", SDTFPBinOp>; - def AMDGPUregister_load : SDNode<"AMDGPUISD::REGISTER_LOAD", SDTypeProfile<1, 2, [SDTCisPtrTy<1>, SDTCisInt<2>]>, [SDNPHasChain, SDNPMayLoad]>; diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td index 83e1359..29df374 100644 --- a/lib/Target/R600/AMDGPUInstructions.td +++ b/lib/Target/R600/AMDGPUInstructions.td @@ -90,6 +90,10 @@ def zextloadi8_global : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{ return isGlobalLoad(dyn_cast<LoadSDNode>(N)); }]>; +def zextloadi8_constant : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{ + return isGlobalLoad(dyn_cast<LoadSDNode>(N)); +}]>; + class Constants { int TWO_PI = 0x40c90fdb; int PI = 0x40490fdb; @@ -276,6 +280,31 @@ multiclass BFIPatterns <Instruction BFI_INT> { } +// SHA-256 Ma patterns + +// ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y +class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat < + (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))), + (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y) +>; + +// Bitfield extract patterns + +def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>; +def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}], + SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(CountTrailingOnes_32(N->getZExtValue()), MVT::i32);}]>>; + +class BFEPattern <Instruction BFE> : Pat < + (and (srl i32:$x, legalshift32:$y), bfemask:$z), + (BFE $x, $y, $z) +>; + +// rotr pattern +class ROTRPattern <Instruction BIT_ALIGN> : Pat < + (rotr i32:$src0, i32:$src1), + (BIT_ALIGN $src0, $src0, $src1) +>; + include "R600Instructions.td" include "SIInstrInfo.td" diff --git a/lib/Target/R600/AMDGPURegisterInfo.cpp b/lib/Target/R600/AMDGPURegisterInfo.cpp index fe994d2..3402092 100644 --- a/lib/Target/R600/AMDGPURegisterInfo.cpp +++ b/lib/Target/R600/AMDGPURegisterInfo.cpp @@ -17,11 +17,9 @@ using namespace llvm; -AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm, - const TargetInstrInfo &tii) +AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm) : AMDGPUGenRegisterInfo(0), - TM(tm), - TII(tii) + TM(tm) { } //===----------------------------------------------------------------------===// diff --git a/lib/Target/R600/AMDGPURegisterInfo.h b/lib/Target/R600/AMDGPURegisterInfo.h index 1fc88e7..7cbd34b 100644 --- a/lib/Target/R600/AMDGPURegisterInfo.h +++ b/lib/Target/R600/AMDGPURegisterInfo.h @@ -30,10 +30,9 @@ class TargetInstrInfo; struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo { TargetMachine &TM; - const TargetInstrInfo &TII; static const uint16_t CalleeSavedReg; - AMDGPURegisterInfo(TargetMachine &tm, const TargetInstrInfo &tii); + AMDGPURegisterInfo(TargetMachine &tm); virtual BitVector getReservedRegs(const MachineFunction &MF) const { assert(!"Unimplemented"); return BitVector(); diff --git a/lib/Target/R600/AMDGPURegisterInfo.td b/lib/Target/R600/AMDGPURegisterInfo.td index b5aca03..835a146 100644 --- a/lib/Target/R600/AMDGPURegisterInfo.td +++ b/lib/Target/R600/AMDGPURegisterInfo.td @@ -14,7 +14,8 @@ let Namespace = "AMDGPU" in { foreach Index = 0-15 in { - def sub#Index : SubRegIndex; + // Indices are used in a variety of ways here, so don't set a size/offset. + def sub#Index : SubRegIndex<-1, -1>; } def INDIRECT_BASE_ADDR : Register <"INDIRECT_BASE_ADDR">; diff --git a/lib/Target/R600/AMDGPUStructurizeCFG.cpp b/lib/Target/R600/AMDGPUStructurizeCFG.cpp index dea43b8..d26783d 100644 --- a/lib/Target/R600/AMDGPUStructurizeCFG.cpp +++ b/lib/Target/R600/AMDGPUStructurizeCFG.cpp @@ -16,14 +16,14 @@ //===----------------------------------------------------------------------===// #include "AMDGPU.h" -#include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/MapVector.h" +#include "llvm/ADT/SCCIterator.h" #include "llvm/Analysis/RegionInfo.h" #include "llvm/Analysis/RegionIterator.h" #include "llvm/Analysis/RegionPass.h" #include "llvm/IR/Module.h" -#include "llvm/Transforms/Utils/SSAUpdater.h" #include "llvm/Support/PatternMatch.h" +#include "llvm/Transforms/Utils/SSAUpdater.h" using namespace llvm; using namespace llvm::PatternMatch; @@ -353,7 +353,7 @@ Value *AMDGPUStructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx, if (Term->isConditional()) { Cond = Term->getCondition(); - if (Idx != Invert) + if (Idx != (unsigned)Invert) Cond = invert(Cond); } return Cond; diff --git a/lib/Target/R600/AMDGPUSubtarget.cpp b/lib/Target/R600/AMDGPUSubtarget.cpp index a7e1d7b..8ed5a74 100644 --- a/lib/Target/R600/AMDGPUSubtarget.cpp +++ b/lib/Target/R600/AMDGPUSubtarget.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "AMDGPUSubtarget.h" +#include <stdio.h> using namespace llvm; @@ -25,8 +26,6 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) : AMDGPUGenSubtargetInfo(TT, CPU, FS), DumpCode(false) { InstrItins = getInstrItineraryForCPU(CPU); - memset(CapsOverride, 0, sizeof(*CapsOverride) - * AMDGPUDeviceInfo::MaxNumberCapabilities); // Default card StringRef GPU = CPU; Is64bit = false; @@ -34,22 +33,15 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) : DefaultSize[1] = 1; DefaultSize[2] = 1; HasVertexCache = false; + TexVTXClauseSize = 0; + Gen = AMDGPUSubtarget::R600; + FP64 = false; + CaymanISA = false; ParseSubtargetFeatures(GPU, FS); DevName = GPU; - Device = AMDGPUDeviceInfo::getDeviceFromName(DevName, this, Is64bit); -} - -AMDGPUSubtarget::~AMDGPUSubtarget() { - delete Device; } bool -AMDGPUSubtarget::isOverride(AMDGPUDeviceInfo::Caps caps) const { - assert(caps < AMDGPUDeviceInfo::MaxNumberCapabilities && - "Caps index is out of bounds!"); - return CapsOverride[caps]; -} -bool AMDGPUSubtarget::is64bit() const { return Is64bit; } @@ -57,6 +49,22 @@ bool AMDGPUSubtarget::hasVertexCache() const { return HasVertexCache; } +short +AMDGPUSubtarget::getTexVTXClauseSize() const { + return TexVTXClauseSize; +} +enum AMDGPUSubtarget::Generation +AMDGPUSubtarget::getGeneration() const { + return Gen; +} +bool +AMDGPUSubtarget::hasHWFP64() const { + return FP64; +} +bool +AMDGPUSubtarget::hasCaymanISA() const { + return CaymanISA; +} bool AMDGPUSubtarget::isTargetELF() const { return false; @@ -72,21 +80,28 @@ AMDGPUSubtarget::getDefaultSize(uint32_t dim) const { std::string AMDGPUSubtarget::getDataLayout() const { - if (!Device) { - return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16" - "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32" - "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64" - "-v96:128:128-v128:128:128-v192:256:256-v256:256:256" - "-v512:512:512-v1024:1024:1024-v2048:2048:2048-a0:0:64"); - } - return Device->getDataLayout(); + std::string DataLayout = std::string( + "e" + "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32" + "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128" + "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048" + "-n32:64" + ); + + if (hasHWFP64()) { + DataLayout.append("-f64:64:64"); + } + + if (is64bit()) { + DataLayout.append("-p:64:64:64"); + } else { + DataLayout.append("-p:32:32:32"); + } + + return DataLayout; } std::string AMDGPUSubtarget::getDeviceName() const { return DevName; } -const AMDGPUDevice * -AMDGPUSubtarget::device() const { - return Device; -} diff --git a/lib/Target/R600/AMDGPUSubtarget.h b/lib/Target/R600/AMDGPUSubtarget.h index b6501a4..8c65096 100644 --- a/lib/Target/R600/AMDGPUSubtarget.h +++ b/lib/Target/R600/AMDGPUSubtarget.h @@ -14,7 +14,7 @@ #ifndef AMDGPUSUBTARGET_H #define AMDGPUSUBTARGET_H -#include "AMDILDevice.h" +#include "AMDGPU.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/Target/TargetSubtargetInfo.h" @@ -27,9 +27,16 @@ namespace llvm { class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo { +public: + enum Generation { + R600 = 0, + R700, + EVERGREEN, + NORTHERN_ISLANDS, + SOUTHERN_ISLANDS + }; + private: - bool CapsOverride[AMDGPUDeviceInfo::MaxNumberCapabilities]; - const AMDGPUDevice *Device; size_t DefaultSize[3]; std::string DevName; bool Is64bit; @@ -37,23 +44,28 @@ private: bool DumpCode; bool R600ALUInst; bool HasVertexCache; + short TexVTXClauseSize; + enum Generation Gen; + bool FP64; + bool CaymanISA; InstrItineraryData InstrItins; public: AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS); - virtual ~AMDGPUSubtarget(); const InstrItineraryData &getInstrItineraryData() const { return InstrItins; } virtual void ParseSubtargetFeatures(StringRef CPU, StringRef FS); - bool isOverride(AMDGPUDeviceInfo::Caps) const; bool is64bit() const; bool hasVertexCache() const; + short getTexVTXClauseSize() const; + enum Generation getGeneration() const; + bool hasHWFP64() const; + bool hasCaymanISA() const; // Helper functions to simplify if statements bool isTargetELF() const; - const AMDGPUDevice* device() const; std::string getDataLayout() const; std::string getDeviceName() const; virtual size_t getDefaultSize(uint32_t dim) const; diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp index 0ec67ce..2fba434 100644 --- a/lib/Target/R600/AMDGPUTargetMachine.cpp +++ b/lib/Target/R600/AMDGPUTargetMachine.cpp @@ -58,18 +58,19 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT, LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OptLevel), Subtarget(TT, CPU, FS), Layout(Subtarget.getDataLayout()), - FrameLowering(TargetFrameLowering::StackGrowsUp, - Subtarget.device()->getStackAlignment(), 0), + FrameLowering(TargetFrameLowering::StackGrowsUp, 16 // Stack Alignment + , 0), IntrinsicInfo(this), InstrItins(&Subtarget.getInstrItineraryData()) { // TLInfo uses InstrInfo so it must be initialized after. - if (Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { - InstrInfo = new R600InstrInfo(*this); - TLInfo = new R600TargetLowering(*this); + if (Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { + InstrInfo.reset(new R600InstrInfo(*this)); + TLInfo.reset(new R600TargetLowering(*this)); } else { - InstrInfo = new SIInstrInfo(*this); - TLInfo = new SITargetLowering(*this); + InstrInfo.reset(new SIInstrInfo(*this)); + TLInfo.reset(new SITargetLowering(*this)); } + initAsmInfo(); } AMDGPUTargetMachine::~AMDGPUTargetMachine() { @@ -81,7 +82,7 @@ public: AMDGPUPassConfig(AMDGPUTargetMachine *TM, PassManagerBase &PM) : TargetPassConfig(TM, PM) { const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { enablePass(&MachineSchedulerID); MachineSchedRegistry::setDefault(createR600MachineScheduler); } @@ -107,19 +108,20 @@ TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) { bool AMDGPUPassConfig::addPreISel() { const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { addPass(createAMDGPUStructurizeCFGPass()); addPass(createSIAnnotateControlFlowPass()); + } else { + addPass(createR600TextureIntrinsicsReplacer()); } return false; } bool AMDGPUPassConfig::addInstSelector() { - addPass(createAMDGPUPeepholeOpt(*TM)); addPass(createAMDGPUISelDag(getAMDGPUTargetMachine())); const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { // This callbacks this pass uses are not implemented yet on SI. addPass(createAMDGPUIndirectAddressingPass(*TM)); } @@ -128,13 +130,18 @@ bool AMDGPUPassConfig::addInstSelector() { bool AMDGPUPassConfig::addPreRegAlloc() { addPass(createAMDGPUConvertToISAPass(*TM)); + const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); + + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { + addPass(createR600VectorRegMerger(*TM)); + } return false; } bool AMDGPUPassConfig::addPostRegAlloc() { const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { addPass(createSIInsertWaits(*TM)); } return false; @@ -148,7 +155,7 @@ bool AMDGPUPassConfig::addPreSched2() { bool AMDGPUPassConfig::addPreEmitPass() { const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { addPass(createAMDGPUCFGPreparationPass(*TM)); addPass(createAMDGPUCFGStructurizerPass(*TM)); addPass(createR600EmitClauseMarkers(*TM)); diff --git a/lib/Target/R600/AMDGPUTargetMachine.h b/lib/Target/R600/AMDGPUTargetMachine.h index 2afe787..bb26ed9 100644 --- a/lib/Target/R600/AMDGPUTargetMachine.h +++ b/lib/Target/R600/AMDGPUTargetMachine.h @@ -25,7 +25,7 @@ namespace llvm { -MCAsmInfo* createMCAsmInfo(const Target &T, StringRef TT); +MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT); class AMDGPUTargetMachine : public LLVMTargetMachine { @@ -33,36 +33,36 @@ class AMDGPUTargetMachine : public LLVMTargetMachine { const DataLayout Layout; AMDGPUFrameLowering FrameLowering; AMDGPUIntrinsicInfo IntrinsicInfo; - const AMDGPUInstrInfo * InstrInfo; - AMDGPUTargetLowering * TLInfo; - const InstrItineraryData* InstrItins; + OwningPtr<AMDGPUInstrInfo> InstrInfo; + OwningPtr<AMDGPUTargetLowering> TLInfo; + const InstrItineraryData *InstrItins; public: - AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS, - StringRef CPU, - TargetOptions Options, - Reloc::Model RM, CodeModel::Model CM, - CodeGenOpt::Level OL); - ~AMDGPUTargetMachine(); - virtual const AMDGPUFrameLowering* getFrameLowering() const { - return &FrameLowering; - } - virtual const AMDGPUIntrinsicInfo* getIntrinsicInfo() const { - return &IntrinsicInfo; - } - virtual const AMDGPUInstrInfo *getInstrInfo() const {return InstrInfo;} - virtual const AMDGPUSubtarget *getSubtargetImpl() const {return &Subtarget; } - virtual const AMDGPURegisterInfo *getRegisterInfo() const { - return &InstrInfo->getRegisterInfo(); - } - virtual AMDGPUTargetLowering * getTargetLowering() const { - return TLInfo; - } - virtual const InstrItineraryData* getInstrItineraryData() const { - return InstrItins; - } - virtual const DataLayout* getDataLayout() const { return &Layout; } - virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); + AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS, + StringRef CPU, TargetOptions Options, Reloc::Model RM, + CodeModel::Model CM, CodeGenOpt::Level OL); + ~AMDGPUTargetMachine(); + virtual const AMDGPUFrameLowering *getFrameLowering() const { + return &FrameLowering; + } + virtual const AMDGPUIntrinsicInfo *getIntrinsicInfo() const { + return &IntrinsicInfo; + } + virtual const AMDGPUInstrInfo *getInstrInfo() const { + return InstrInfo.get(); + } + virtual const AMDGPUSubtarget *getSubtargetImpl() const { return &Subtarget; } + virtual const AMDGPURegisterInfo *getRegisterInfo() const { + return &InstrInfo->getRegisterInfo(); + } + virtual AMDGPUTargetLowering *getTargetLowering() const { + return TLInfo.get(); + } + virtual const InstrItineraryData *getInstrItineraryData() const { + return InstrItins; + } + virtual const DataLayout *getDataLayout() const { return &Layout; } + virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); }; } // End namespace llvm diff --git a/lib/Target/R600/AMDIL.h b/lib/Target/R600/AMDIL.h deleted file mode 100644 index 39ab664..0000000 --- a/lib/Target/R600/AMDIL.h +++ /dev/null @@ -1,121 +0,0 @@ -//===-- AMDIL.h - Top-level interface for AMDIL representation --*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//==-----------------------------------------------------------------------===// -// -/// This file contains the entry points for global functions defined in the LLVM -/// AMDGPU back-end. -// -//===----------------------------------------------------------------------===// - -#ifndef AMDIL_H -#define AMDIL_H - -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/Target/TargetMachine.h" - -#define ARENA_SEGMENT_RESERVED_UAVS 12 -#define DEFAULT_ARENA_UAV_ID 8 -#define DEFAULT_RAW_UAV_ID 7 -#define GLOBAL_RETURN_RAW_UAV_ID 11 -#define HW_MAX_NUM_CB 8 -#define MAX_NUM_UNIQUE_UAVS 8 -#define OPENCL_MAX_NUM_ATOMIC_COUNTERS 8 -#define OPENCL_MAX_READ_IMAGES 128 -#define OPENCL_MAX_WRITE_IMAGES 8 -#define OPENCL_MAX_SAMPLERS 16 - -// The next two values can never be zero, as zero is the ID that is -// used to assert against. -#define DEFAULT_LDS_ID 1 -#define DEFAULT_GDS_ID 1 -#define DEFAULT_SCRATCH_ID 1 -#define DEFAULT_VEC_SLOTS 8 - -#define OCL_DEVICE_RV710 0x0001 -#define OCL_DEVICE_RV730 0x0002 -#define OCL_DEVICE_RV770 0x0004 -#define OCL_DEVICE_CEDAR 0x0008 -#define OCL_DEVICE_REDWOOD 0x0010 -#define OCL_DEVICE_JUNIPER 0x0020 -#define OCL_DEVICE_CYPRESS 0x0040 -#define OCL_DEVICE_CAICOS 0x0080 -#define OCL_DEVICE_TURKS 0x0100 -#define OCL_DEVICE_BARTS 0x0200 -#define OCL_DEVICE_CAYMAN 0x0400 -#define OCL_DEVICE_ALL 0x3FFF - -/// The number of function ID's that are reserved for -/// internal compiler usage. -const unsigned int RESERVED_FUNCS = 1024; - -namespace llvm { -class AMDGPUInstrPrinter; -class FunctionPass; -class MCAsmInfo; -class raw_ostream; -class Target; -class TargetMachine; - -// Instruction selection passes. -FunctionPass* - createAMDGPUISelDag(TargetMachine &TM); -FunctionPass* - createAMDGPUPeepholeOpt(TargetMachine &TM); - -// Pre emit passes. -FunctionPass* - createAMDGPUCFGPreparationPass(TargetMachine &TM); -FunctionPass* - createAMDGPUCFGStructurizerPass(TargetMachine &TM); - -extern Target TheAMDGPUTarget; -} // end namespace llvm; - -// Include device information enumerations -#include "AMDILDeviceInfo.h" - -namespace llvm { -/// OpenCL uses address spaces to differentiate between -/// various memory regions on the hardware. On the CPU -/// all of the address spaces point to the same memory, -/// however on the GPU, each address space points to -/// a seperate piece of memory that is unique from other -/// memory locations. -namespace AMDGPUAS { -enum AddressSpaces { - PRIVATE_ADDRESS = 0, ///< Address space for private memory. - GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0). - CONSTANT_ADDRESS = 2, ///< Address space for constant memory - LOCAL_ADDRESS = 3, ///< Address space for local memory. - REGION_ADDRESS = 4, ///< Address space for region memory. - ADDRESS_NONE = 5, ///< Address space for unknown memory. - PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0) - PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1) - CONSTANT_BUFFER_0 = 8, - CONSTANT_BUFFER_1 = 9, - CONSTANT_BUFFER_2 = 10, - CONSTANT_BUFFER_3 = 11, - CONSTANT_BUFFER_4 = 12, - CONSTANT_BUFFER_5 = 13, - CONSTANT_BUFFER_6 = 14, - CONSTANT_BUFFER_7 = 15, - CONSTANT_BUFFER_8 = 16, - CONSTANT_BUFFER_9 = 17, - CONSTANT_BUFFER_10 = 18, - CONSTANT_BUFFER_11 = 19, - CONSTANT_BUFFER_12 = 20, - CONSTANT_BUFFER_13 = 21, - CONSTANT_BUFFER_14 = 22, - CONSTANT_BUFFER_15 = 23, - LAST_ADDRESS = 24 -}; - -} // namespace AMDGPUAS - -} // end namespace llvm -#endif // AMDIL_H diff --git a/lib/Target/R600/AMDIL7XXDevice.cpp b/lib/Target/R600/AMDIL7XXDevice.cpp deleted file mode 100644 index ea6ac34..0000000 --- a/lib/Target/R600/AMDIL7XXDevice.cpp +++ /dev/null @@ -1,115 +0,0 @@ -//===-- AMDIL7XXDevice.cpp - Device Info for 7XX GPUs ---------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -// \file -//==-----------------------------------------------------------------------===// -#include "AMDIL7XXDevice.h" -#include "AMDGPUSubtarget.h" -#include "AMDILDevice.h" - -using namespace llvm; - -AMDGPU7XXDevice::AMDGPU7XXDevice(AMDGPUSubtarget *ST) : AMDGPUDevice(ST) { - setCaps(); - std::string name = mSTM->getDeviceName(); - if (name == "rv710") { - DeviceFlag = OCL_DEVICE_RV710; - } else if (name == "rv730") { - DeviceFlag = OCL_DEVICE_RV730; - } else { - DeviceFlag = OCL_DEVICE_RV770; - } -} - -AMDGPU7XXDevice::~AMDGPU7XXDevice() { -} - -void AMDGPU7XXDevice::setCaps() { - mSWBits.set(AMDGPUDeviceInfo::LocalMem); -} - -size_t AMDGPU7XXDevice::getMaxLDSSize() const { - if (usesHardware(AMDGPUDeviceInfo::LocalMem)) { - return MAX_LDS_SIZE_700; - } - return 0; -} - -size_t AMDGPU7XXDevice::getWavefrontSize() const { - return AMDGPUDevice::HalfWavefrontSize; -} - -uint32_t AMDGPU7XXDevice::getGeneration() const { - return AMDGPUDeviceInfo::HD4XXX; -} - -uint32_t AMDGPU7XXDevice::getResourceID(uint32_t DeviceID) const { - switch (DeviceID) { - default: - assert(0 && "ID type passed in is unknown!"); - break; - case GLOBAL_ID: - case CONSTANT_ID: - case RAW_UAV_ID: - case ARENA_UAV_ID: - break; - case LDS_ID: - if (usesHardware(AMDGPUDeviceInfo::LocalMem)) { - return DEFAULT_LDS_ID; - } - break; - case SCRATCH_ID: - if (usesHardware(AMDGPUDeviceInfo::PrivateMem)) { - return DEFAULT_SCRATCH_ID; - } - break; - case GDS_ID: - assert(0 && "GDS UAV ID is not supported on this chip"); - if (usesHardware(AMDGPUDeviceInfo::RegionMem)) { - return DEFAULT_GDS_ID; - } - break; - }; - - return 0; -} - -uint32_t AMDGPU7XXDevice::getMaxNumUAVs() const { - return 1; -} - -AMDGPU770Device::AMDGPU770Device(AMDGPUSubtarget *ST): AMDGPU7XXDevice(ST) { - setCaps(); -} - -AMDGPU770Device::~AMDGPU770Device() { -} - -void AMDGPU770Device::setCaps() { - if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) { - mSWBits.set(AMDGPUDeviceInfo::FMA); - mHWBits.set(AMDGPUDeviceInfo::DoubleOps); - } - mSWBits.set(AMDGPUDeviceInfo::BarrierDetect); - mHWBits.reset(AMDGPUDeviceInfo::LongOps); - mSWBits.set(AMDGPUDeviceInfo::LongOps); - mSWBits.set(AMDGPUDeviceInfo::LocalMem); -} - -size_t AMDGPU770Device::getWavefrontSize() const { - return AMDGPUDevice::WavefrontSize; -} - -AMDGPU710Device::AMDGPU710Device(AMDGPUSubtarget *ST) : AMDGPU7XXDevice(ST) { -} - -AMDGPU710Device::~AMDGPU710Device() { -} - -size_t AMDGPU710Device::getWavefrontSize() const { - return AMDGPUDevice::QuarterWavefrontSize; -} diff --git a/lib/Target/R600/AMDIL7XXDevice.h b/lib/Target/R600/AMDIL7XXDevice.h deleted file mode 100644 index 1cf4ca4..0000000 --- a/lib/Target/R600/AMDIL7XXDevice.h +++ /dev/null @@ -1,72 +0,0 @@ -//==-- AMDIL7XXDevice.h - Define 7XX Device Device for AMDIL ---*- C++ -*--===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//==-----------------------------------------------------------------------===// -/// \file -/// \brief Interface for the subtarget data classes. -/// -/// This file will define the interface that each generation needs to -/// implement in order to correctly answer queries on the capabilities of the -/// specific hardware. -//===----------------------------------------------------------------------===// -#ifndef AMDIL7XXDEVICEIMPL_H -#define AMDIL7XXDEVICEIMPL_H -#include "AMDILDevice.h" - -namespace llvm { -class AMDGPUSubtarget; - -//===----------------------------------------------------------------------===// -// 7XX generation of devices and their respective sub classes -//===----------------------------------------------------------------------===// - -/// \brief The AMDGPU7XXDevice class represents the generic 7XX device. -/// -/// All 7XX devices are derived from this class. The AMDGPU7XX device will only -/// support the minimal features that are required to be considered OpenCL 1.0 -/// compliant and nothing more. -class AMDGPU7XXDevice : public AMDGPUDevice { -public: - AMDGPU7XXDevice(AMDGPUSubtarget *ST); - virtual ~AMDGPU7XXDevice(); - virtual size_t getMaxLDSSize() const; - virtual size_t getWavefrontSize() const; - virtual uint32_t getGeneration() const; - virtual uint32_t getResourceID(uint32_t DeviceID) const; - virtual uint32_t getMaxNumUAVs() const; - -protected: - virtual void setCaps(); -}; - -/// \brief The AMDGPU770Device class represents the RV770 chip and it's -/// derivative cards. -/// -/// The difference between this device and the base class is this device device -/// adds support for double precision and has a larger wavefront size. -class AMDGPU770Device : public AMDGPU7XXDevice { -public: - AMDGPU770Device(AMDGPUSubtarget *ST); - virtual ~AMDGPU770Device(); - virtual size_t getWavefrontSize() const; -private: - virtual void setCaps(); -}; - -/// \brief The AMDGPU710Device class derives from the 7XX base class. -/// -/// This class is a smaller derivative, so we need to overload some of the -/// functions in order to correctly specify this information. -class AMDGPU710Device : public AMDGPU7XXDevice { -public: - AMDGPU710Device(AMDGPUSubtarget *ST); - virtual ~AMDGPU710Device(); - virtual size_t getWavefrontSize() const; -}; - -} // namespace llvm -#endif // AMDILDEVICEIMPL_H diff --git a/lib/Target/R600/AMDILBase.td b/lib/Target/R600/AMDILBase.td index e221110..5dcd478 100644 --- a/lib/Target/R600/AMDILBase.td +++ b/lib/Target/R600/AMDILBase.td @@ -16,70 +16,6 @@ def ALU_NULL : FuncUnit; def NullALU : InstrItinClass; //===----------------------------------------------------------------------===// -// AMDIL Subtarget features. -//===----------------------------------------------------------------------===// -def FeatureFP64 : SubtargetFeature<"fp64", - "CapsOverride[AMDGPUDeviceInfo::DoubleOps]", - "true", - "Enable 64bit double precision operations">; -def FeatureByteAddress : SubtargetFeature<"byte_addressable_store", - "CapsOverride[AMDGPUDeviceInfo::ByteStores]", - "true", - "Enable byte addressable stores">; -def FeatureBarrierDetect : SubtargetFeature<"barrier_detect", - "CapsOverride[AMDGPUDeviceInfo::BarrierDetect]", - "true", - "Enable duplicate barrier detection(HD5XXX or later).">; -def FeatureImages : SubtargetFeature<"images", - "CapsOverride[AMDGPUDeviceInfo::Images]", - "true", - "Enable image functions">; -def FeatureMultiUAV : SubtargetFeature<"multi_uav", - "CapsOverride[AMDGPUDeviceInfo::MultiUAV]", - "true", - "Generate multiple UAV code(HD5XXX family or later)">; -def FeatureMacroDB : SubtargetFeature<"macrodb", - "CapsOverride[AMDGPUDeviceInfo::MacroDB]", - "true", - "Use internal macrodb, instead of macrodb in driver">; -def FeatureNoAlias : SubtargetFeature<"noalias", - "CapsOverride[AMDGPUDeviceInfo::NoAlias]", - "true", - "assert that all kernel argument pointers are not aliased">; -def FeatureNoInline : SubtargetFeature<"no-inline", - "CapsOverride[AMDGPUDeviceInfo::NoInline]", - "true", - "specify whether to not inline functions">; - -def Feature64BitPtr : SubtargetFeature<"64BitPtr", - "Is64bit", - "false", - "Specify if 64bit addressing should be used.">; - -def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr", - "Is32on64bit", - "false", - "Specify if 64bit sized pointers with 32bit addressing should be used.">; -def FeatureDebug : SubtargetFeature<"debug", - "CapsOverride[AMDGPUDeviceInfo::Debug]", - "true", - "Debug mode is enabled, so disable hardware accelerated address spaces.">; -def FeatureDumpCode : SubtargetFeature <"DumpCode", - "DumpCode", - "true", - "Dump MachineInstrs in the CodeEmitter">; - -def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst", - "R600ALUInst", - "false", - "Older version of ALU instructions encoding.">; - -def FeatureVertexCache : SubtargetFeature<"HasVertexCache", - "HasVertexCache", - "true", - "Specify use of dedicated vertex cache.">; - -//===----------------------------------------------------------------------===// // Register File, Calling Conv, Instruction Descriptions //===----------------------------------------------------------------------===// diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp index b0cd0f9..4910e5d 100644 --- a/lib/Target/R600/AMDILCFGStructurizer.cpp +++ b/lib/Target/R600/AMDILCFGStructurizer.cpp @@ -11,8 +11,8 @@ #define DEBUGME 0 #define DEBUG_TYPE "structcfg" +#include "AMDGPU.h" #include "AMDGPUInstrInfo.h" -#include "AMDIL.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -28,9 +28,12 @@ #include "llvm/CodeGen/MachinePostDominators.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" using namespace llvm; +#define DEFAULT_VEC_SLOTS 8 + // TODO: move-begin. //===----------------------------------------------------------------------===// @@ -57,7 +60,7 @@ STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions"); // Miscellaneous utility for CFGStructurizer. // //===----------------------------------------------------------------------===// -namespace llvmCFGStruct { +namespace { #define SHOWNEWINSTR(i) \ if (DEBUGME) errs() << "New instr: " << *i << "\n" @@ -98,7 +101,7 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) { } } -} //end namespace llvmCFGStruct +} // end anonymous namespace //===----------------------------------------------------------------------===// // @@ -106,7 +109,7 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) { // //===----------------------------------------------------------------------===// -namespace llvmCFGStruct { +namespace { template<class PassT> struct CFGStructTraits { }; @@ -142,7 +145,7 @@ public: LandInformation() : landBlk(NULL) {} }; -} //end of namespace llvmCFGStruct +} // end anonymous namespace //===----------------------------------------------------------------------===// // @@ -150,7 +153,7 @@ public: // //===----------------------------------------------------------------------===// -namespace llvmCFGStruct { +namespace { // bixia TODO: port it to BasicBlock, not just MachineBasicBlock. template<class PassT> class CFGStructurizer { @@ -2446,7 +2449,7 @@ CFGStructurizer<PassT>::findNearestCommonPostDom return commonDom; } //findNearestCommonPostDom -} //end namespace llvm +} // end anonymous namespace //todo: move-end @@ -2458,9 +2461,7 @@ CFGStructurizer<PassT>::findNearestCommonPostDom //===----------------------------------------------------------------------===// -using namespace llvmCFGStruct; - -namespace llvm { +namespace { class AMDGPUCFGStructurizer : public MachineFunctionPass { public: typedef MachineInstr InstructionType; @@ -2474,26 +2475,26 @@ public: protected: TargetMachine &TM; - const TargetInstrInfo *TII; - const AMDGPURegisterInfo *TRI; public: AMDGPUCFGStructurizer(char &pid, TargetMachine &tm); const TargetInstrInfo *getTargetInstrInfo() const; - -private: - + const AMDGPURegisterInfo *getTargetRegisterInfo() const; }; -} //end of namespace llvm +} // end anonymous namespace AMDGPUCFGStructurizer::AMDGPUCFGStructurizer(char &pid, TargetMachine &tm) -: MachineFunctionPass(pid), TM(tm), TII(tm.getInstrInfo()), - TRI(static_cast<const AMDGPURegisterInfo *>(tm.getRegisterInfo())) { + : MachineFunctionPass(pid), TM(tm) { } const TargetInstrInfo *AMDGPUCFGStructurizer::getTargetInstrInfo() const { - return TII; + return TM.getInstrInfo(); +} + +const AMDGPURegisterInfo *AMDGPUCFGStructurizer::getTargetRegisterInfo() const { + return static_cast<const AMDGPURegisterInfo *>(TM.getRegisterInfo()); } + //===----------------------------------------------------------------------===// // // CFGPrepare @@ -2501,9 +2502,7 @@ const TargetInstrInfo *AMDGPUCFGStructurizer::getTargetInstrInfo() const { //===----------------------------------------------------------------------===// -using namespace llvmCFGStruct; - -namespace llvm { +namespace { class AMDGPUCFGPrepare : public AMDGPUCFGStructurizer { public: static char ID; @@ -2515,13 +2514,10 @@ public: virtual void getAnalysisUsage(AnalysisUsage &AU) const; bool runOnMachineFunction(MachineFunction &F); - -private: - }; char AMDGPUCFGPrepare::ID = 0; -} //end of namespace llvm +} // end anonymous namespace AMDGPUCFGPrepare::AMDGPUCFGPrepare(TargetMachine &tm) : AMDGPUCFGStructurizer(ID, tm ) { @@ -2545,9 +2541,7 @@ void AMDGPUCFGPrepare::getAnalysisUsage(AnalysisUsage &AU) const { //===----------------------------------------------------------------------===// -using namespace llvmCFGStruct; - -namespace llvm { +namespace { class AMDGPUCFGPerform : public AMDGPUCFGStructurizer { public: static char ID; @@ -2557,13 +2551,10 @@ public: virtual const char *getPassName() const; virtual void getAnalysisUsage(AnalysisUsage &AU) const; bool runOnMachineFunction(MachineFunction &F); - -private: - }; char AMDGPUCFGPerform::ID = 0; -} //end of namespace llvm +} // end anonymous namespace AMDGPUCFGPerform::AMDGPUCFGPerform(TargetMachine &tm) : AMDGPUCFGStructurizer(ID, tm) { @@ -2587,7 +2578,7 @@ void AMDGPUCFGPerform::getAnalysisUsage(AnalysisUsage &AU) const { // //===----------------------------------------------------------------------===// -namespace llvmCFGStruct { +namespace { // this class is tailor to the AMDGPU backend template<> struct CFGStructTraits<AMDGPUCFGStructurizer> { @@ -3024,28 +3015,24 @@ struct CFGStructTraits<AMDGPUCFGStructurizer> { return &pass.getAnalysis<MachineLoopInfo>(); } }; // template class CFGStructTraits -} //end of namespace llvm +} // end anonymous namespace // createAMDGPUCFGPreparationPass- Returns a pass -FunctionPass *llvm::createAMDGPUCFGPreparationPass(TargetMachine &tm - ) { - return new AMDGPUCFGPrepare(tm ); +FunctionPass *llvm::createAMDGPUCFGPreparationPass(TargetMachine &tm) { + return new AMDGPUCFGPrepare(tm); } bool AMDGPUCFGPrepare::runOnMachineFunction(MachineFunction &func) { - return llvmCFGStruct::CFGStructurizer<AMDGPUCFGStructurizer>().prepare(func, - *this, - TRI); + return CFGStructurizer<AMDGPUCFGStructurizer>().prepare(func, *this, + getTargetRegisterInfo()); } // createAMDGPUCFGStructurizerPass- Returns a pass -FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm - ) { - return new AMDGPUCFGPerform(tm ); +FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm) { + return new AMDGPUCFGPerform(tm); } bool AMDGPUCFGPerform::runOnMachineFunction(MachineFunction &func) { - return llvmCFGStruct::CFGStructurizer<AMDGPUCFGStructurizer>().run(func, - *this, - TRI); + return CFGStructurizer<AMDGPUCFGStructurizer>().run(func, *this, + getTargetRegisterInfo()); } diff --git a/lib/Target/R600/AMDILDevice.cpp b/lib/Target/R600/AMDILDevice.cpp deleted file mode 100644 index db8e01e..0000000 --- a/lib/Target/R600/AMDILDevice.cpp +++ /dev/null @@ -1,132 +0,0 @@ -//===-- AMDILDevice.cpp - Base class for AMDIL Devices --------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -/// \file -//==-----------------------------------------------------------------------===// -#include "AMDILDevice.h" -#include "AMDGPUSubtarget.h" - -using namespace llvm; -// Default implementation for all of the classes. -AMDGPUDevice::AMDGPUDevice(AMDGPUSubtarget *ST) : mSTM(ST) { - mHWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities); - mSWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities); - setCaps(); - DeviceFlag = OCL_DEVICE_ALL; -} - -AMDGPUDevice::~AMDGPUDevice() { - mHWBits.clear(); - mSWBits.clear(); -} - -size_t AMDGPUDevice::getMaxGDSSize() const { - return 0; -} - -uint32_t -AMDGPUDevice::getDeviceFlag() const { - return DeviceFlag; -} - -size_t AMDGPUDevice::getMaxNumCBs() const { - if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) { - return HW_MAX_NUM_CB; - } - - return 0; -} - -size_t AMDGPUDevice::getMaxCBSize() const { - if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) { - return MAX_CB_SIZE; - } - - return 0; -} - -size_t AMDGPUDevice::getMaxScratchSize() const { - return 65536; -} - -uint32_t AMDGPUDevice::getStackAlignment() const { - return 16; -} - -void AMDGPUDevice::setCaps() { - mSWBits.set(AMDGPUDeviceInfo::HalfOps); - mSWBits.set(AMDGPUDeviceInfo::ByteOps); - mSWBits.set(AMDGPUDeviceInfo::ShortOps); - mSWBits.set(AMDGPUDeviceInfo::HW64BitDivMod); - if (mSTM->isOverride(AMDGPUDeviceInfo::NoInline)) { - mSWBits.set(AMDGPUDeviceInfo::NoInline); - } - if (mSTM->isOverride(AMDGPUDeviceInfo::MacroDB)) { - mSWBits.set(AMDGPUDeviceInfo::MacroDB); - } - if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) { - mSWBits.set(AMDGPUDeviceInfo::ConstantMem); - } else { - mHWBits.set(AMDGPUDeviceInfo::ConstantMem); - } - if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) { - mSWBits.set(AMDGPUDeviceInfo::PrivateMem); - } else { - mHWBits.set(AMDGPUDeviceInfo::PrivateMem); - } - if (mSTM->isOverride(AMDGPUDeviceInfo::BarrierDetect)) { - mSWBits.set(AMDGPUDeviceInfo::BarrierDetect); - } - mSWBits.set(AMDGPUDeviceInfo::ByteLDSOps); - mSWBits.set(AMDGPUDeviceInfo::LongOps); -} - -AMDGPUDeviceInfo::ExecutionMode -AMDGPUDevice::getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const { - if (mHWBits[Caps]) { - assert(!mSWBits[Caps] && "Cannot set both SW and HW caps"); - return AMDGPUDeviceInfo::Hardware; - } - - if (mSWBits[Caps]) { - assert(!mHWBits[Caps] && "Cannot set both SW and HW caps"); - return AMDGPUDeviceInfo::Software; - } - - return AMDGPUDeviceInfo::Unsupported; - -} - -bool AMDGPUDevice::isSupported(AMDGPUDeviceInfo::Caps Mode) const { - return getExecutionMode(Mode) != AMDGPUDeviceInfo::Unsupported; -} - -bool AMDGPUDevice::usesHardware(AMDGPUDeviceInfo::Caps Mode) const { - return getExecutionMode(Mode) == AMDGPUDeviceInfo::Hardware; -} - -bool AMDGPUDevice::usesSoftware(AMDGPUDeviceInfo::Caps Mode) const { - return getExecutionMode(Mode) == AMDGPUDeviceInfo::Software; -} - -std::string -AMDGPUDevice::getDataLayout() const { - std::string DataLayout = std::string( - "e" - "-p:32:32:32" - "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32" - "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128" - "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048" - "-n32:64" - ); - - if (usesHardware(AMDGPUDeviceInfo::DoubleOps)) { - DataLayout.append("-f64:64:64"); - } - - return DataLayout; -} diff --git a/lib/Target/R600/AMDILDevice.h b/lib/Target/R600/AMDILDevice.h deleted file mode 100644 index 97df98c..0000000 --- a/lib/Target/R600/AMDILDevice.h +++ /dev/null @@ -1,117 +0,0 @@ -//===---- AMDILDevice.h - Define Device Data for AMDGPU -----*- C++ -*------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//==-----------------------------------------------------------------------===// -// -/// \file -/// \brief Interface for the subtarget data classes. -// -/// This file will define the interface that each generation needs to -/// implement in order to correctly answer queries on the capabilities of the -/// specific hardware. -//===----------------------------------------------------------------------===// -#ifndef AMDILDEVICEIMPL_H -#define AMDILDEVICEIMPL_H -#include "AMDIL.h" -#include "llvm/ADT/BitVector.h" - -namespace llvm { - class AMDGPUSubtarget; - class MCStreamer; -//===----------------------------------------------------------------------===// -// Interface for data that is specific to a single device -//===----------------------------------------------------------------------===// -class AMDGPUDevice { -public: - AMDGPUDevice(AMDGPUSubtarget *ST); - virtual ~AMDGPUDevice(); - - // Enum values for the various memory types. - enum { - RAW_UAV_ID = 0, - ARENA_UAV_ID = 1, - LDS_ID = 2, - GDS_ID = 3, - SCRATCH_ID = 4, - CONSTANT_ID = 5, - GLOBAL_ID = 6, - MAX_IDS = 7 - } IO_TYPE_IDS; - - /// \returns The max LDS size that the hardware supports. Size is in - /// bytes. - virtual size_t getMaxLDSSize() const = 0; - - /// \returns The max GDS size that the hardware supports if the GDS is - /// supported by the hardware. Size is in bytes. - virtual size_t getMaxGDSSize() const; - - /// \returns The max number of hardware constant address spaces that - /// are supported by this device. - virtual size_t getMaxNumCBs() const; - - /// \returns The max number of bytes a single hardware constant buffer - /// can support. Size is in bytes. - virtual size_t getMaxCBSize() const; - - /// \returns The max number of bytes allowed by the hardware scratch - /// buffer. Size is in bytes. - virtual size_t getMaxScratchSize() const; - - /// \brief Get the flag that corresponds to the device. - virtual uint32_t getDeviceFlag() const; - - /// \returns The number of work-items that exist in a single hardware - /// wavefront. - virtual size_t getWavefrontSize() const = 0; - - /// \brief Get the generational name of this specific device. - virtual uint32_t getGeneration() const = 0; - - /// \brief Get the stack alignment of this specific device. - virtual uint32_t getStackAlignment() const; - - /// \brief Get the resource ID for this specific device. - virtual uint32_t getResourceID(uint32_t DeviceID) const = 0; - - /// \brief Get the max number of UAV's for this device. - virtual uint32_t getMaxNumUAVs() const = 0; - - - // API utilizing more detailed capabilities of each family of - // cards. If a capability is supported, then either usesHardware or - // usesSoftware returned true. If usesHardware returned true, then - // usesSoftware must return false for the same capability. Hardware - // execution means that the feature is done natively by the hardware - // and is not emulated by the softare. Software execution means - // that the feature could be done in the hardware, but there is - // software that emulates it with possibly using the hardware for - // support since the hardware does not fully comply with OpenCL - // specs. - - bool isSupported(AMDGPUDeviceInfo::Caps Mode) const; - bool usesHardware(AMDGPUDeviceInfo::Caps Mode) const; - bool usesSoftware(AMDGPUDeviceInfo::Caps Mode) const; - virtual std::string getDataLayout() const; - static const unsigned int MAX_LDS_SIZE_700 = 16384; - static const unsigned int MAX_LDS_SIZE_800 = 32768; - static const unsigned int WavefrontSize = 64; - static const unsigned int HalfWavefrontSize = 32; - static const unsigned int QuarterWavefrontSize = 16; -protected: - virtual void setCaps(); - BitVector mHWBits; - llvm::BitVector mSWBits; - AMDGPUSubtarget *mSTM; - uint32_t DeviceFlag; -private: - AMDGPUDeviceInfo::ExecutionMode - getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const; -}; - -} // namespace llvm -#endif // AMDILDEVICEIMPL_H diff --git a/lib/Target/R600/AMDILDeviceInfo.cpp b/lib/Target/R600/AMDILDeviceInfo.cpp deleted file mode 100644 index 1787959..0000000 --- a/lib/Target/R600/AMDILDeviceInfo.cpp +++ /dev/null @@ -1,96 +0,0 @@ -//===-- AMDILDeviceInfo.cpp - AMDILDeviceInfo class -----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//==-----------------------------------------------------------------------===// -// -/// \file -/// \brief Function that creates DeviceInfo from a device name and other information. -// -//==-----------------------------------------------------------------------===// -#include "AMDILDevices.h" -#include "AMDGPUSubtarget.h" - -using namespace llvm; -namespace llvm { -namespace AMDGPUDeviceInfo { - -AMDGPUDevice* getDeviceFromName(const std::string &deviceName, - AMDGPUSubtarget *ptr, - bool is64bit, bool is64on32bit) { - if (deviceName.c_str()[2] == '7') { - switch (deviceName.c_str()[3]) { - case '1': - return new AMDGPU710Device(ptr); - case '7': - return new AMDGPU770Device(ptr); - default: - return new AMDGPU7XXDevice(ptr); - } - } else if (deviceName == "cypress") { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPUCypressDevice(ptr); - } else if (deviceName == "juniper") { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPUEvergreenDevice(ptr); - } else if (deviceName == "redwood" || deviceName == "sumo") { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPURedwoodDevice(ptr); - } else if (deviceName == "cedar") { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPUCedarDevice(ptr); - } else if (deviceName == "barts" || deviceName == "turks") { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPUNIDevice(ptr); - } else if (deviceName == "cayman") { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPUCaymanDevice(ptr); - } else if (deviceName == "caicos") { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPUNIDevice(ptr); - } else if (deviceName == "SI" || - deviceName == "tahiti" || deviceName == "pitcairn" || - deviceName == "verde" || deviceName == "oland") { - return new AMDGPUSIDevice(ptr); - } else { -#if DEBUG - assert(!is64bit && "This device does not support 64bit pointers!"); - assert(!is64on32bit && "This device does not support 64bit" - " on 32bit pointers!"); -#endif - return new AMDGPU7XXDevice(ptr); - } -} -} // End namespace AMDGPUDeviceInfo -} // End namespace llvm diff --git a/lib/Target/R600/AMDILDeviceInfo.h b/lib/Target/R600/AMDILDeviceInfo.h deleted file mode 100644 index 4b2c3a5..0000000 --- a/lib/Target/R600/AMDILDeviceInfo.h +++ /dev/null @@ -1,88 +0,0 @@ -//===-- AMDILDeviceInfo.h - Constants for describing devices --------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -/// \file -//==-----------------------------------------------------------------------===// -#ifndef AMDILDEVICEINFO_H -#define AMDILDEVICEINFO_H - - -#include <string> - -namespace llvm { - class AMDGPUDevice; - class AMDGPUSubtarget; - namespace AMDGPUDeviceInfo { - /// Each Capabilities can be executed using a hardware instruction, - /// emulated with a sequence of software instructions, or not - /// supported at all. - enum ExecutionMode { - Unsupported = 0, ///< Unsupported feature on the card(Default value) - /// This is the execution mode that is set if the feature is emulated in - /// software. - Software, - /// This execution mode is set if the feature exists natively in hardware - Hardware - }; - - enum Caps { - HalfOps = 0x1, ///< Half float is supported or not. - DoubleOps = 0x2, ///< Double is supported or not. - ByteOps = 0x3, ///< Byte(char) is support or not. - ShortOps = 0x4, ///< Short is supported or not. - LongOps = 0x5, ///< Long is supported or not. - Images = 0x6, ///< Images are supported or not. - ByteStores = 0x7, ///< ByteStores available(!HD4XXX). - ConstantMem = 0x8, ///< Constant/CB memory. - LocalMem = 0x9, ///< Local/LDS memory. - PrivateMem = 0xA, ///< Scratch/Private/Stack memory. - RegionMem = 0xB, ///< OCL GDS Memory Extension. - FMA = 0xC, ///< Use HW FMA or SW FMA. - ArenaSegment = 0xD, ///< Use for Arena UAV per pointer 12-1023. - MultiUAV = 0xE, ///< Use for UAV per Pointer 0-7. - Reserved0 = 0xF, ///< ReservedFlag - NoAlias = 0x10, ///< Cached loads. - Signed24BitOps = 0x11, ///< Peephole Optimization. - /// Debug mode implies that no hardware features or optimizations - /// are performned and that all memory access go through a single - /// uav(Arena on HD5XXX/HD6XXX and Raw on HD4XXX). - Debug = 0x12, - CachedMem = 0x13, ///< Cached mem is available or not. - BarrierDetect = 0x14, ///< Detect duplicate barriers. - Reserved1 = 0x15, ///< Reserved flag - ByteLDSOps = 0x16, ///< Flag to specify if byte LDS ops are available. - ArenaVectors = 0x17, ///< Flag to specify if vector loads from arena work. - TmrReg = 0x18, ///< Flag to specify if Tmr register is supported. - NoInline = 0x19, ///< Flag to specify that no inlining should occur. - MacroDB = 0x1A, ///< Flag to specify that backend handles macrodb. - HW64BitDivMod = 0x1B, ///< Flag for backend to generate 64bit div/mod. - ArenaUAV = 0x1C, ///< Flag to specify that arena uav is supported. - PrivateUAV = 0x1D, ///< Flag to specify that private memory uses uav's. - /// If more capabilities are required, then - /// this number needs to be increased. - /// All capabilities must come before this - /// number. - MaxNumberCapabilities = 0x20 - }; - /// These have to be in order with the older generations - /// having the lower number enumerations. - enum Generation { - HD4XXX = 0, ///< 7XX based devices. - HD5XXX, ///< Evergreen based devices. - HD6XXX, ///< NI/Evergreen+ based devices. - HD7XXX, ///< Southern Islands based devices. - HDTEST, ///< Experimental feature testing device. - HDNUMGEN - }; - - - AMDGPUDevice* - getDeviceFromName(const std::string &name, AMDGPUSubtarget *ptr, - bool is64bit = false, bool is64on32bit = false); - } // namespace AMDILDeviceInfo -} // namespace llvm -#endif // AMDILDEVICEINFO_H diff --git a/lib/Target/R600/AMDILDevices.h b/lib/Target/R600/AMDILDevices.h deleted file mode 100644 index 636fa6d..0000000 --- a/lib/Target/R600/AMDILDevices.h +++ /dev/null @@ -1,19 +0,0 @@ -//===-- AMDILDevices.h - Consolidate AMDIL Device headers -----------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -/// \file -//==-----------------------------------------------------------------------===// -#ifndef AMDIL_DEVICES_H -#define AMDIL_DEVICES_H -// Include all of the device specific header files -#include "AMDIL7XXDevice.h" -#include "AMDILDevice.h" -#include "AMDILEvergreenDevice.h" -#include "AMDILNIDevice.h" -#include "AMDILSIDevice.h" - -#endif // AMDIL_DEVICES_H diff --git a/lib/Target/R600/AMDILEvergreenDevice.cpp b/lib/Target/R600/AMDILEvergreenDevice.cpp deleted file mode 100644 index c5213a0..0000000 --- a/lib/Target/R600/AMDILEvergreenDevice.cpp +++ /dev/null @@ -1,169 +0,0 @@ -//===-- AMDILEvergreenDevice.cpp - Device Info for Evergreen --------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -/// \file -//==-----------------------------------------------------------------------===// -#include "AMDILEvergreenDevice.h" - -using namespace llvm; - -AMDGPUEvergreenDevice::AMDGPUEvergreenDevice(AMDGPUSubtarget *ST) -: AMDGPUDevice(ST) { - setCaps(); - std::string name = ST->getDeviceName(); - if (name == "cedar") { - DeviceFlag = OCL_DEVICE_CEDAR; - } else if (name == "redwood") { - DeviceFlag = OCL_DEVICE_REDWOOD; - } else if (name == "cypress") { - DeviceFlag = OCL_DEVICE_CYPRESS; - } else { - DeviceFlag = OCL_DEVICE_JUNIPER; - } -} - -AMDGPUEvergreenDevice::~AMDGPUEvergreenDevice() { -} - -size_t AMDGPUEvergreenDevice::getMaxLDSSize() const { - if (usesHardware(AMDGPUDeviceInfo::LocalMem)) { - return MAX_LDS_SIZE_800; - } else { - return 0; - } -} -size_t AMDGPUEvergreenDevice::getMaxGDSSize() const { - if (usesHardware(AMDGPUDeviceInfo::RegionMem)) { - return MAX_LDS_SIZE_800; - } else { - return 0; - } -} -uint32_t AMDGPUEvergreenDevice::getMaxNumUAVs() const { - return 12; -} - -uint32_t AMDGPUEvergreenDevice::getResourceID(uint32_t id) const { - switch(id) { - default: - assert(0 && "ID type passed in is unknown!"); - break; - case CONSTANT_ID: - case RAW_UAV_ID: - return GLOBAL_RETURN_RAW_UAV_ID; - case GLOBAL_ID: - case ARENA_UAV_ID: - return DEFAULT_ARENA_UAV_ID; - case LDS_ID: - if (usesHardware(AMDGPUDeviceInfo::LocalMem)) { - return DEFAULT_LDS_ID; - } else { - return DEFAULT_ARENA_UAV_ID; - } - case GDS_ID: - if (usesHardware(AMDGPUDeviceInfo::RegionMem)) { - return DEFAULT_GDS_ID; - } else { - return DEFAULT_ARENA_UAV_ID; - } - case SCRATCH_ID: - if (usesHardware(AMDGPUDeviceInfo::PrivateMem)) { - return DEFAULT_SCRATCH_ID; - } else { - return DEFAULT_ARENA_UAV_ID; - } - }; - return 0; -} - -size_t AMDGPUEvergreenDevice::getWavefrontSize() const { - return AMDGPUDevice::WavefrontSize; -} - -uint32_t AMDGPUEvergreenDevice::getGeneration() const { - return AMDGPUDeviceInfo::HD5XXX; -} - -void AMDGPUEvergreenDevice::setCaps() { - mSWBits.set(AMDGPUDeviceInfo::ArenaSegment); - mHWBits.set(AMDGPUDeviceInfo::ArenaUAV); - mHWBits.set(AMDGPUDeviceInfo::HW64BitDivMod); - mSWBits.reset(AMDGPUDeviceInfo::HW64BitDivMod); - mSWBits.set(AMDGPUDeviceInfo::Signed24BitOps); - if (mSTM->isOverride(AMDGPUDeviceInfo::ByteStores)) { - mHWBits.set(AMDGPUDeviceInfo::ByteStores); - } - if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) { - mSWBits.set(AMDGPUDeviceInfo::LocalMem); - mSWBits.set(AMDGPUDeviceInfo::RegionMem); - } else { - mHWBits.set(AMDGPUDeviceInfo::LocalMem); - mHWBits.set(AMDGPUDeviceInfo::RegionMem); - } - mHWBits.set(AMDGPUDeviceInfo::Images); - if (mSTM->isOverride(AMDGPUDeviceInfo::NoAlias)) { - mHWBits.set(AMDGPUDeviceInfo::NoAlias); - } - mHWBits.set(AMDGPUDeviceInfo::CachedMem); - if (mSTM->isOverride(AMDGPUDeviceInfo::MultiUAV)) { - mHWBits.set(AMDGPUDeviceInfo::MultiUAV); - } - mHWBits.set(AMDGPUDeviceInfo::ByteLDSOps); - mSWBits.reset(AMDGPUDeviceInfo::ByteLDSOps); - mHWBits.set(AMDGPUDeviceInfo::ArenaVectors); - mHWBits.set(AMDGPUDeviceInfo::LongOps); - mSWBits.reset(AMDGPUDeviceInfo::LongOps); - mHWBits.set(AMDGPUDeviceInfo::TmrReg); -} - -AMDGPUCypressDevice::AMDGPUCypressDevice(AMDGPUSubtarget *ST) - : AMDGPUEvergreenDevice(ST) { - setCaps(); -} - -AMDGPUCypressDevice::~AMDGPUCypressDevice() { -} - -void AMDGPUCypressDevice::setCaps() { - if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) { - mHWBits.set(AMDGPUDeviceInfo::DoubleOps); - mHWBits.set(AMDGPUDeviceInfo::FMA); - } -} - - -AMDGPUCedarDevice::AMDGPUCedarDevice(AMDGPUSubtarget *ST) - : AMDGPUEvergreenDevice(ST) { - setCaps(); -} - -AMDGPUCedarDevice::~AMDGPUCedarDevice() { -} - -void AMDGPUCedarDevice::setCaps() { - mSWBits.set(AMDGPUDeviceInfo::FMA); -} - -size_t AMDGPUCedarDevice::getWavefrontSize() const { - return AMDGPUDevice::QuarterWavefrontSize; -} - -AMDGPURedwoodDevice::AMDGPURedwoodDevice(AMDGPUSubtarget *ST) - : AMDGPUEvergreenDevice(ST) { - setCaps(); -} - -AMDGPURedwoodDevice::~AMDGPURedwoodDevice() { -} - -void AMDGPURedwoodDevice::setCaps() { - mSWBits.set(AMDGPUDeviceInfo::FMA); -} - -size_t AMDGPURedwoodDevice::getWavefrontSize() const { - return AMDGPUDevice::HalfWavefrontSize; -} diff --git a/lib/Target/R600/AMDILEvergreenDevice.h b/lib/Target/R600/AMDILEvergreenDevice.h deleted file mode 100644 index ea90f77..0000000 --- a/lib/Target/R600/AMDILEvergreenDevice.h +++ /dev/null @@ -1,93 +0,0 @@ -//==- AMDILEvergreenDevice.h - Define Evergreen Device for AMDIL -*- C++ -*--=// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//==-----------------------------------------------------------------------===// -// -/// \file -/// \brief Interface for the subtarget data classes. -/// -/// This file will define the interface that each generation needs to -/// implement in order to correctly answer queries on the capabilities of the -/// specific hardware. -//===----------------------------------------------------------------------===// -#ifndef AMDILEVERGREENDEVICE_H -#define AMDILEVERGREENDEVICE_H -#include "AMDGPUSubtarget.h" -#include "AMDILDevice.h" - -namespace llvm { - class AMDGPUSubtarget; -//===----------------------------------------------------------------------===// -// Evergreen generation of devices and their respective sub classes -//===----------------------------------------------------------------------===// - - -/// \brief The AMDGPUEvergreenDevice is the base device class for all of the Evergreen -/// series of cards. -/// -/// This class contains information required to differentiate -/// the Evergreen device from the generic AMDGPUDevice. This device represents -/// that capabilities of the 'Juniper' cards, also known as the HD57XX. -class AMDGPUEvergreenDevice : public AMDGPUDevice { -public: - AMDGPUEvergreenDevice(AMDGPUSubtarget *ST); - virtual ~AMDGPUEvergreenDevice(); - virtual size_t getMaxLDSSize() const; - virtual size_t getMaxGDSSize() const; - virtual size_t getWavefrontSize() const; - virtual uint32_t getGeneration() const; - virtual uint32_t getMaxNumUAVs() const; - virtual uint32_t getResourceID(uint32_t) const; -protected: - virtual void setCaps(); -}; - -/// The AMDGPUCypressDevice is similiar to the AMDGPUEvergreenDevice, except it has -/// support for double precision operations. This device is used to represent -/// both the Cypress and Hemlock cards, which are commercially known as HD58XX -/// and HD59XX cards. -class AMDGPUCypressDevice : public AMDGPUEvergreenDevice { -public: - AMDGPUCypressDevice(AMDGPUSubtarget *ST); - virtual ~AMDGPUCypressDevice(); -private: - virtual void setCaps(); -}; - - -/// \brief The AMDGPUCedarDevice is the class that represents all of the 'Cedar' based -/// devices. -/// -/// This class differs from the base AMDGPUEvergreenDevice in that the -/// device is a ~quarter of the 'Juniper'. These are commercially known as the -/// HD54XX and HD53XX series of cards. -class AMDGPUCedarDevice : public AMDGPUEvergreenDevice { -public: - AMDGPUCedarDevice(AMDGPUSubtarget *ST); - virtual ~AMDGPUCedarDevice(); - virtual size_t getWavefrontSize() const; -private: - virtual void setCaps(); -}; - -/// \brief The AMDGPURedwoodDevice is the class the represents all of the 'Redwood' based -/// devices. -/// -/// This class differs from the base class, in that these devices are -/// considered about half of a 'Juniper' device. These are commercially known as -/// the HD55XX and HD56XX series of cards. -class AMDGPURedwoodDevice : public AMDGPUEvergreenDevice { -public: - AMDGPURedwoodDevice(AMDGPUSubtarget *ST); - virtual ~AMDGPURedwoodDevice(); - virtual size_t getWavefrontSize() const; -private: - virtual void setCaps(); -}; - -} // namespace llvm -#endif // AMDILEVERGREENDEVICE_H diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp index ba75a44..93432a2 100644 --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp @@ -14,14 +14,14 @@ #include "AMDGPUInstrInfo.h" #include "AMDGPUISelLowering.h" // For AMDGPUISD #include "AMDGPURegisterInfo.h" -#include "AMDILDevices.h" #include "R600InstrInfo.h" #include "SIISelLowering.h" #include "llvm/ADT/ValueMap.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/Support/Compiler.h" -#include "llvm/CodeGen/SelectionDAG.h" #include <list> #include <queue> @@ -48,7 +48,10 @@ public: private: inline SDValue getSmallIPtrImm(unsigned Imm); + bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs, + const R600InstrInfo *TII, std::vector<unsigned> Cst); bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &); + bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &); // Complex pattern selectors bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2); @@ -164,7 +167,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { default: break; case ISD::BUILD_VECTOR: { const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { break; } // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG @@ -194,7 +197,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { case ISD::BUILD_PAIR: { SDValue RC, SubReg0, SubReg1; const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { break; } if (N->getValueType(0) == MVT::i128) { @@ -211,7 +214,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, N->getOperand(1), SubReg1 }; return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, - N->getDebugLoc(), N->getValueType(0), Ops); + SDLoc(N), N->getValueType(0), Ops); } case ISD::ConstantFP: @@ -219,7 +222,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); // XXX: Custom immediate lowering not implemented yet. Instead we use // pseudo instructions defined in SIInstructions.td - if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { break; } const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo()); @@ -314,9 +317,23 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { // Fold operands of selected node const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo()); + if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) { + bool IsModified = false; + do { + std::vector<SDValue> Ops; + for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end(); + I != E; ++I) + Ops.push_back(*I); + IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops); + if (IsModified) { + Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size()); + } + } while (IsModified); + + } if (Result && Result->isMachineOpcode() && !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR) && TII->isALUInstr(Result->getMachineOpcode())) { @@ -359,6 +376,43 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { return Result; } +bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, + SDValue &Abs, const R600InstrInfo *TII, + std::vector<unsigned> Consts) { + switch (Src.getOpcode()) { + case AMDGPUISD::CONST_ADDRESS: { + SDValue CstOffset; + if (Src.getValueType().isVector() || + !SelectGlobalValueConstantOffset(Src.getOperand(0), CstOffset)) + return false; + + ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset); + Consts.push_back(Cst->getZExtValue()); + if (!TII->fitsConstReadLimitations(Consts)) + return false; + + Src = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32); + Sel = CstOffset; + return true; + } + case ISD::FNEG: + Src = Src.getOperand(0); + Neg = CurDAG->getTargetConstant(1, MVT::i32); + return true; + case ISD::FABS: + if (!Abs.getNode()) + return false; + Src = Src.getOperand(0); + Abs = CurDAG->getTargetConstant(1, MVT::i32); + return true; + case ISD::BITCAST: + Src = Src.getOperand(0); + return true; + default: + return false; + } +} + bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode, const R600InstrInfo *TII, std::vector<SDValue> &Ops) { int OperandIdx[] = { @@ -382,59 +436,101 @@ bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode, -1 }; + // Gather constants values + std::vector<unsigned> Consts; + for (unsigned j = 0; j < 3; j++) { + int SrcIdx = OperandIdx[j]; + if (SrcIdx < 0) + break; + if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) { + if (Reg->getReg() == AMDGPU::ALU_CONST) { + ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]); + Consts.push_back(Cst->getZExtValue()); + } + } + } + for (unsigned i = 0; i < 3; i++) { if (OperandIdx[i] < 0) return false; - SDValue Operand = Ops[OperandIdx[i] - 1]; - switch (Operand.getOpcode()) { - case AMDGPUISD::CONST_ADDRESS: { - SDValue CstOffset; - if (Operand.getValueType().isVector() || - !SelectGlobalValueConstantOffset(Operand.getOperand(0), CstOffset)) - break; - - // Gather others constants values - std::vector<unsigned> Consts; - for (unsigned j = 0; j < 3; j++) { - int SrcIdx = OperandIdx[j]; - if (SrcIdx < 0) - break; - if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) { - if (Reg->getReg() == AMDGPU::ALU_CONST) { - ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]); - Consts.push_back(Cst->getZExtValue()); - } - } - } + SDValue &Src = Ops[OperandIdx[i] - 1]; + SDValue &Sel = Ops[SelIdx[i] - 1]; + SDValue &Neg = Ops[NegIdx[i] - 1]; + SDValue FakeAbs; + SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs; + if (FoldOperand(Src, Sel, Neg, Abs, TII, Consts)) + return true; + } + return false; +} - ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset); - Consts.push_back(Cst->getZExtValue()); - if (!TII->fitsConstReadLimitations(Consts)) - break; +bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode, + const R600InstrInfo *TII, std::vector<SDValue> &Ops) { + int OperandIdx[] = { + TII->getOperandIdx(Opcode, R600Operands::SRC0_X), + TII->getOperandIdx(Opcode, R600Operands::SRC0_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC0_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC0_W), + TII->getOperandIdx(Opcode, R600Operands::SRC1_X), + TII->getOperandIdx(Opcode, R600Operands::SRC1_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC1_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC1_W) + }; + int SelIdx[] = { + TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_X), + TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_W), + TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_X), + TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_W) + }; + int NegIdx[] = { + TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_X), + TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_W), + TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_X), + TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_W) + }; + int AbsIdx[] = { + TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_X), + TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_W), + TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_X), + TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Y), + TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Z), + TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_W) + }; - Ops[OperandIdx[i] - 1] = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32); - Ops[SelIdx[i] - 1] = CstOffset; - return true; - } - case ISD::FNEG: - if (NegIdx[i] < 0) - break; - Ops[OperandIdx[i] - 1] = Operand.getOperand(0); - Ops[NegIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32); - return true; - case ISD::FABS: - if (AbsIdx[i] < 0) - break; - Ops[OperandIdx[i] - 1] = Operand.getOperand(0); - Ops[AbsIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32); - return true; - case ISD::BITCAST: - Ops[OperandIdx[i] - 1] = Operand.getOperand(0); - return true; - default: + // Gather constants values + std::vector<unsigned> Consts; + for (unsigned j = 0; j < 8; j++) { + int SrcIdx = OperandIdx[j]; + if (SrcIdx < 0) break; + if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) { + if (Reg->getReg() == AMDGPU::ALU_CONST) { + ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]); + Consts.push_back(Cst->getZExtValue()); + } } } + + for (unsigned i = 0; i < 8; i++) { + if (OperandIdx[i] < 0) + return false; + SDValue &Src = Ops[OperandIdx[i] - 1]; + SDValue &Sel = Ops[SelIdx[i] - 1]; + SDValue &Neg = Ops[NegIdx[i] - 1]; + SDValue &Abs = Ops[AbsIdx[i] - 1]; + if (FoldOperand(Src, Sel, Neg, Abs, TII, Consts)) + return true; + } return false; } @@ -616,7 +712,7 @@ bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) && isInt<16>(IMMOffset->getZExtValue())) { Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), - CurDAG->getEntryNode().getDebugLoc(), + SDLoc(CurDAG->getEntryNode()), AMDGPU::ZERO, MVT::i32); Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32); return true; @@ -649,18 +745,45 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, void AMDGPUDAGToDAGISel::PostprocessISelDAG() { + if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) { + return; + } + // Go over all selected nodes and try to fold them a bit more - const AMDGPUTargetLowering& Lowering = ((const AMDGPUTargetLowering&)TLI); + const AMDGPUTargetLowering& Lowering = (*(const AMDGPUTargetLowering*)TLI); for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), E = CurDAG->allnodes_end(); I != E; ++I) { - MachineSDNode *Node = dyn_cast<MachineSDNode>(I); - if (!Node) + SDNode *Node = I; + switch (Node->getOpcode()) { + // Fix the register class in copy to CopyToReg nodes - ISel will always + // use SReg classes for 64-bit copies, but this is not always what we want. + case ISD::CopyToReg: { + unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); + SDValue Val = Node->getOperand(2); + const TargetRegisterClass *RC = RegInfo->getRegClass(Reg); + if (RC != &AMDGPU::SReg_64RegClass) { + continue; + } + + if (!Val.getNode()->isMachineOpcode()) { + continue; + } + + const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode()); + const TargetRegisterInfo *TRI = TM.getRegisterInfo(); + RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass)); continue; + } + } - SDNode *ResNode = Lowering.PostISelFolding(Node, *CurDAG); - if (ResNode != Node) + MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I); + if (!MachineNode) + continue; + + SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG); + if (ResNode != Node) { ReplaceUses(Node, ResNode); + } } } - diff --git a/lib/Target/R600/AMDILISelLowering.cpp b/lib/Target/R600/AMDILISelLowering.cpp index 922cac1..d669966 100644 --- a/lib/Target/R600/AMDILISelLowering.cpp +++ b/lib/Target/R600/AMDILISelLowering.cpp @@ -15,7 +15,6 @@ #include "AMDGPUISelLowering.h" #include "AMDGPURegisterInfo.h" #include "AMDGPUSubtarget.h" -#include "AMDILDevices.h" #include "AMDILIntrinsicInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -138,8 +137,6 @@ void AMDGPUTargetLowering::InitAMDILLowering() { setOperationAction(ISD::SMUL_LOHI, VT, Expand); setOperationAction(ISD::UMUL_LOHI, VT, Expand); - // GPU doesn't have a rotl, rotr, or byteswap instruction - setOperationAction(ISD::ROTR, VT, Expand); setOperationAction(ISD::BSWAP, VT, Expand); // GPU doesn't have any counting operators @@ -158,21 +155,19 @@ void AMDGPUTargetLowering::InitAMDILLowering() { setOperationAction(ISD::SELECT_CC, VT, Expand); } - if (STM.device()->isSupported(AMDGPUDeviceInfo::LongOps)) { - setOperationAction(ISD::MULHU, MVT::i64, Expand); - setOperationAction(ISD::MULHU, MVT::v2i64, Expand); - setOperationAction(ISD::MULHS, MVT::i64, Expand); - setOperationAction(ISD::MULHS, MVT::v2i64, Expand); - setOperationAction(ISD::ADD, MVT::v2i64, Expand); - setOperationAction(ISD::SREM, MVT::v2i64, Expand); - setOperationAction(ISD::Constant , MVT::i64 , Legal); - setOperationAction(ISD::SDIV, MVT::v2i64, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand); - setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand); - setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand); - setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand); - } - if (STM.device()->isSupported(AMDGPUDeviceInfo::DoubleOps)) { + setOperationAction(ISD::MULHU, MVT::i64, Expand); + setOperationAction(ISD::MULHU, MVT::v2i64, Expand); + setOperationAction(ISD::MULHS, MVT::i64, Expand); + setOperationAction(ISD::MULHS, MVT::v2i64, Expand); + setOperationAction(ISD::ADD, MVT::v2i64, Expand); + setOperationAction(ISD::SREM, MVT::v2i64, Expand); + setOperationAction(ISD::Constant , MVT::i64 , Legal); + setOperationAction(ISD::SDIV, MVT::v2i64, Expand); + setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand); + setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand); + setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand); + setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand); + if (STM.hasHWFP64()) { // we support loading/storing v2f64 but not operations on the type setOperationAction(ISD::FADD, MVT::v2f64, Expand); setOperationAction(ISD::FSUB, MVT::v2f64, Expand); @@ -331,7 +326,7 @@ SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { SDValue Data = Op.getOperand(0); VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1)); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT DVT = Data.getValueType(); EVT BVT = BaseType->getVT(); unsigned baseBits = BVT.getScalarType().getSizeInBits(); @@ -387,7 +382,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Result; Result = DAG.getNode( AMDGPUISD::BRANCH_COND, - Op.getDebugLoc(), + SDLoc(Op), Op.getValueType(), Chain, Jump, Cond); return Result; @@ -395,7 +390,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); @@ -476,7 +471,7 @@ AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); @@ -547,7 +542,7 @@ AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); MVT INTTY = MVT::i32; if (OVT == MVT::v2i8) { @@ -564,7 +559,7 @@ AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); MVT INTTY = MVT::i32; if (OVT == MVT::v2i16) { @@ -581,7 +576,7 @@ AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const { SDValue AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT OVT = Op.getValueType(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); diff --git a/lib/Target/R600/AMDILInstrInfo.td b/lib/Target/R600/AMDILInstrInfo.td index 110f147..f7d0bd5 100644 --- a/lib/Target/R600/AMDILInstrInfo.td +++ b/lib/Target/R600/AMDILInstrInfo.td @@ -10,63 +10,6 @@ // This file describes the AMDIL instructions in TableGen format. // //===----------------------------------------------------------------------===// -// AMDIL Instruction Predicate Definitions -// Predicate that is set to true if the hardware supports double precision -// divide -def HasHWDDiv : Predicate<"Subtarget.device()" - "->getGeneration() > AMDGPUDeviceInfo::HD4XXX && " - "Subtarget.device()->usesHardware(AMDGPUDeviceInfo::DoubleOps)">; - -// Predicate that is set to true if the hardware supports double, but not double -// precision divide in hardware -def HasSWDDiv : Predicate<"Subtarget.device()" - "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&" - "Subtarget.device()->usesHardware(AMDGPUDeviceInfo::DoubleOps)">; - -// Predicate that is set to true if the hardware support 24bit signed -// math ops. Otherwise a software expansion to 32bit math ops is used instead. -def HasHWSign24Bit : Predicate<"Subtarget.device()" - "->getGeneration() > AMDGPUDeviceInfo::HD5XXX">; - -// Predicate that is set to true if 64bit operations are supported or not -def HasHW64Bit : Predicate<"Subtarget.device()" - "->usesHardware(AMDGPUDeviceInfo::LongOps)">; -def HasSW64Bit : Predicate<"Subtarget.device()" - "->usesSoftware(AMDGPUDeviceInfo::LongOps)">; - -// Predicate that is set to true if the timer register is supported -def HasTmrRegister : Predicate<"Subtarget.device()" - "->isSupported(AMDGPUDeviceInfo::TmrReg)">; -// Predicate that is true if we are at least evergreen series -def HasDeviceIDInst : Predicate<"Subtarget.device()" - "->getGeneration() >= AMDGPUDeviceInfo::HD5XXX">; - -// Predicate that is true if we have region address space. -def hasRegionAS : Predicate<"Subtarget.device()" - "->usesHardware(AMDGPUDeviceInfo::RegionMem)">; - -// Predicate that is false if we don't have region address space. -def noRegionAS : Predicate<"!Subtarget.device()" - "->isSupported(AMDGPUDeviceInfo::RegionMem)">; - - -// Predicate that is set to true if 64bit Mul is supported in the IL or not -def HasHW64Mul : Predicate<"Subtarget.calVersion()" - ">= CAL_VERSION_SC_139" - "&& Subtarget.device()" - "->getGeneration() >=" - "AMDGPUDeviceInfo::HD5XXX">; -def HasSW64Mul : Predicate<"Subtarget.calVersion()" - "< CAL_VERSION_SC_139">; -// Predicate that is set to true if 64bit Div/Mod is supported in the IL or not -def HasHW64DivMod : Predicate<"Subtarget.device()" - "->usesHardware(AMDGPUDeviceInfo::HW64BitDivMod)">; -def HasSW64DivMod : Predicate<"Subtarget.device()" - "->usesSoftware(AMDGPUDeviceInfo::HW64BitDivMod)">; - -// Predicate that is set to true if 64bit pointer are used. -def Has64BitPtr : Predicate<"Subtarget.is64bit()">; -def Has32BitPtr : Predicate<"!Subtarget.is64bit()">; //===--------------------------------------------------------------------===// // Custom Operands //===--------------------------------------------------------------------===// diff --git a/lib/Target/R600/AMDILIntrinsicInfo.cpp b/lib/Target/R600/AMDILIntrinsicInfo.cpp index 4ddb057..762ee39 100644 --- a/lib/Target/R600/AMDILIntrinsicInfo.cpp +++ b/lib/Target/R600/AMDILIntrinsicInfo.cpp @@ -14,7 +14,6 @@ #include "AMDILIntrinsicInfo.h" #include "AMDGPUSubtarget.h" -#include "AMDIL.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" @@ -50,6 +49,9 @@ AMDGPUIntrinsicInfo::getName(unsigned int IntrID, Type **Tys, unsigned int AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const { + if (!StringRef(Name, Len).startswith("llvm.")) + return 0; // All intrinsics start with 'llvm.' + #define GET_FUNCTION_RECOGNIZER #include "AMDGPUGenIntrinsics.inc" #undef GET_FUNCTION_RECOGNIZER diff --git a/lib/Target/R600/AMDILNIDevice.cpp b/lib/Target/R600/AMDILNIDevice.cpp deleted file mode 100644 index 47c3f7f..0000000 --- a/lib/Target/R600/AMDILNIDevice.cpp +++ /dev/null @@ -1,65 +0,0 @@ -//===-- AMDILNIDevice.cpp - Device Info for Northern Islands devices ------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -/// \file -//==-----------------------------------------------------------------------===// -#include "AMDILNIDevice.h" -#include "AMDGPUSubtarget.h" -#include "AMDILEvergreenDevice.h" - -using namespace llvm; - -AMDGPUNIDevice::AMDGPUNIDevice(AMDGPUSubtarget *ST) - : AMDGPUEvergreenDevice(ST) { - std::string name = ST->getDeviceName(); - if (name == "caicos") { - DeviceFlag = OCL_DEVICE_CAICOS; - } else if (name == "turks") { - DeviceFlag = OCL_DEVICE_TURKS; - } else if (name == "cayman") { - DeviceFlag = OCL_DEVICE_CAYMAN; - } else { - DeviceFlag = OCL_DEVICE_BARTS; - } -} -AMDGPUNIDevice::~AMDGPUNIDevice() { -} - -size_t -AMDGPUNIDevice::getMaxLDSSize() const { - if (usesHardware(AMDGPUDeviceInfo::LocalMem)) { - return MAX_LDS_SIZE_900; - } else { - return 0; - } -} - -uint32_t -AMDGPUNIDevice::getGeneration() const { - return AMDGPUDeviceInfo::HD6XXX; -} - - -AMDGPUCaymanDevice::AMDGPUCaymanDevice(AMDGPUSubtarget *ST) - : AMDGPUNIDevice(ST) { - setCaps(); -} - -AMDGPUCaymanDevice::~AMDGPUCaymanDevice() { -} - -void -AMDGPUCaymanDevice::setCaps() { - if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) { - mHWBits.set(AMDGPUDeviceInfo::DoubleOps); - mHWBits.set(AMDGPUDeviceInfo::FMA); - } - mHWBits.set(AMDGPUDeviceInfo::Signed24BitOps); - mSWBits.reset(AMDGPUDeviceInfo::Signed24BitOps); - mSWBits.set(AMDGPUDeviceInfo::ArenaSegment); -} - diff --git a/lib/Target/R600/AMDILNIDevice.h b/lib/Target/R600/AMDILNIDevice.h deleted file mode 100644 index 24a6408..0000000 --- a/lib/Target/R600/AMDILNIDevice.h +++ /dev/null @@ -1,57 +0,0 @@ -//===------- AMDILNIDevice.h - Define NI Device for AMDIL -*- C++ -*------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//==-----------------------------------------------------------------------===// -/// \file -/// \brief Interface for the subtarget data classes. -/// -/// This file will define the interface that each generation needs to -/// implement in order to correctly answer queries on the capabilities of the -/// specific hardware. -//===---------------------------------------------------------------------===// -#ifndef AMDILNIDEVICE_H -#define AMDILNIDEVICE_H -#include "AMDGPUSubtarget.h" -#include "AMDILEvergreenDevice.h" - -namespace llvm { - -class AMDGPUSubtarget; -//===---------------------------------------------------------------------===// -// NI generation of devices and their respective sub classes -//===---------------------------------------------------------------------===// - -/// \brief The AMDGPUNIDevice is the base class for all Northern Island series of -/// cards. -/// -/// It is very similiar to the AMDGPUEvergreenDevice, with the major -/// exception being differences in wavefront size and hardware capabilities. The -/// NI devices are all 64 wide wavefronts and also add support for signed 24 bit -/// integer operations -class AMDGPUNIDevice : public AMDGPUEvergreenDevice { -public: - AMDGPUNIDevice(AMDGPUSubtarget*); - virtual ~AMDGPUNIDevice(); - virtual size_t getMaxLDSSize() const; - virtual uint32_t getGeneration() const; -}; - -/// Just as the AMDGPUCypressDevice is the double capable version of the -/// AMDGPUEvergreenDevice, the AMDGPUCaymanDevice is the double capable version -/// of the AMDGPUNIDevice. The other major difference is that the Cayman Device -/// has 4 wide ALU's, whereas the rest of the NI family is a 5 wide. -class AMDGPUCaymanDevice: public AMDGPUNIDevice { -public: - AMDGPUCaymanDevice(AMDGPUSubtarget*); - virtual ~AMDGPUCaymanDevice(); -private: - virtual void setCaps(); -}; - -static const unsigned int MAX_LDS_SIZE_900 = AMDGPUDevice::MAX_LDS_SIZE_800; -} // namespace llvm -#endif // AMDILNIDEVICE_H diff --git a/lib/Target/R600/AMDILPeepholeOptimizer.cpp b/lib/Target/R600/AMDILPeepholeOptimizer.cpp deleted file mode 100644 index 3a28038..0000000 --- a/lib/Target/R600/AMDILPeepholeOptimizer.cpp +++ /dev/null @@ -1,1215 +0,0 @@ -//===-- AMDILPeepholeOptimizer.cpp - AMDGPU Peephole optimizations ---------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -/// \file -//==-----------------------------------------------------------------------===// - -#define DEBUG_TYPE "PeepholeOpt" -#ifdef DEBUG -#define DEBUGME (DebugFlag && isCurrentDebugType(DEBUG_TYPE)) -#else -#define DEBUGME 0 -#endif - -#include "AMDILDevices.h" -#include "AMDGPUInstrInfo.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/ADT/StringExtras.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/ADT/Twine.h" -#include "llvm/IR/Constants.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineFunctionAnalysis.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/Instructions.h" -#include "llvm/IR/Module.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/MathExtras.h" - -#include <sstream> - -#if 0 -STATISTIC(PointerAssignments, "Number of dynamic pointer " - "assigments discovered"); -STATISTIC(PointerSubtract, "Number of pointer subtractions discovered"); -#endif - -using namespace llvm; -// The Peephole optimization pass is used to do simple last minute optimizations -// that are required for correct code or to remove redundant functions -namespace { - -class OpaqueType; - -class LLVM_LIBRARY_VISIBILITY AMDGPUPeepholeOpt : public FunctionPass { -public: - TargetMachine &TM; - static char ID; - AMDGPUPeepholeOpt(TargetMachine &tm); - ~AMDGPUPeepholeOpt(); - const char *getPassName() const; - bool runOnFunction(Function &F); - bool doInitialization(Module &M); - bool doFinalization(Module &M); - void getAnalysisUsage(AnalysisUsage &AU) const; -protected: -private: - // Function to initiate all of the instruction level optimizations. - bool instLevelOptimizations(BasicBlock::iterator *inst); - // Quick check to see if we need to dump all of the pointers into the - // arena. If this is correct, then we set all pointers to exist in arena. This - // is a workaround for aliasing of pointers in a struct/union. - bool dumpAllIntoArena(Function &F); - // Because I don't want to invalidate any pointers while in the - // safeNestedForEachFunction. I push atomic conversions to a vector and handle - // it later. This function does the conversions if required. - void doAtomicConversionIfNeeded(Function &F); - // Because __amdil_is_constant cannot be properly evaluated if - // optimizations are disabled, the call's are placed in a vector - // and evaluated after the __amdil_image* functions are evaluated - // which should allow the __amdil_is_constant function to be - // evaluated correctly. - void doIsConstCallConversionIfNeeded(); - bool mChanged; - bool mDebug; - bool mConvertAtomics; - CodeGenOpt::Level optLevel; - // Run a series of tests to see if we can optimize a CALL instruction. - bool optimizeCallInst(BasicBlock::iterator *bbb); - // A peephole optimization to optimize bit extract sequences. - bool optimizeBitExtract(Instruction *inst); - // A peephole optimization to optimize bit insert sequences. - bool optimizeBitInsert(Instruction *inst); - bool setupBitInsert(Instruction *base, - Instruction *&src, - Constant *&mask, - Constant *&shift); - // Expand the bit field insert instruction on versions of OpenCL that - // don't support it. - bool expandBFI(CallInst *CI); - // Expand the bit field mask instruction on version of OpenCL that - // don't support it. - bool expandBFM(CallInst *CI); - // On 7XX and 8XX operations, we do not have 24 bit signed operations. So in - // this case we need to expand them. These functions check for 24bit functions - // and then expand. - bool isSigned24BitOps(CallInst *CI); - void expandSigned24BitOps(CallInst *CI); - // One optimization that can occur is that if the required workgroup size is - // specified then the result of get_local_size is known at compile time and - // can be returned accordingly. - bool isRWGLocalOpt(CallInst *CI); - // On northern island cards, the division is slightly less accurate than on - // previous generations, so we need to utilize a more accurate division. So we - // can translate the accurate divide to a normal divide on all other cards. - bool convertAccurateDivide(CallInst *CI); - void expandAccurateDivide(CallInst *CI); - // If the alignment is set incorrectly, it can produce really inefficient - // code. This checks for this scenario and fixes it if possible. - bool correctMisalignedMemOp(Instruction *inst); - - // If we are in no opt mode, then we need to make sure that - // local samplers are properly propagated as constant propagation - // doesn't occur and we need to know the value of kernel defined - // samplers at compile time. - bool propagateSamplerInst(CallInst *CI); - - // Helper functions - - // Group of functions that recursively calculate the size of a structure based - // on it's sub-types. - size_t getTypeSize(Type * const T, bool dereferencePtr = false); - size_t getTypeSize(StructType * const ST, bool dereferencePtr = false); - size_t getTypeSize(IntegerType * const IT, bool dereferencePtr = false); - size_t getTypeSize(FunctionType * const FT,bool dereferencePtr = false); - size_t getTypeSize(ArrayType * const AT, bool dereferencePtr = false); - size_t getTypeSize(VectorType * const VT, bool dereferencePtr = false); - size_t getTypeSize(PointerType * const PT, bool dereferencePtr = false); - size_t getTypeSize(OpaqueType * const OT, bool dereferencePtr = false); - - LLVMContext *mCTX; - Function *mF; - const AMDGPUSubtarget *mSTM; - SmallVector< std::pair<CallInst *, Function *>, 16> atomicFuncs; - SmallVector<CallInst *, 16> isConstVec; -}; // class AMDGPUPeepholeOpt - char AMDGPUPeepholeOpt::ID = 0; - -// A template function that has two levels of looping before calling the -// function with a pointer to the current iterator. -template<class InputIterator, class SecondIterator, class Function> -Function safeNestedForEach(InputIterator First, InputIterator Last, - SecondIterator S, Function F) { - for ( ; First != Last; ++First) { - SecondIterator sf, sl; - for (sf = First->begin(), sl = First->end(); - sf != sl; ) { - if (!F(&sf)) { - ++sf; - } - } - } - return F; -} - -} // anonymous namespace - -namespace llvm { - FunctionPass * - createAMDGPUPeepholeOpt(TargetMachine &tm) { - return new AMDGPUPeepholeOpt(tm); - } -} // llvm namespace - -AMDGPUPeepholeOpt::AMDGPUPeepholeOpt(TargetMachine &tm) - : FunctionPass(ID), TM(tm) { - mDebug = DEBUGME; - optLevel = TM.getOptLevel(); - -} - -AMDGPUPeepholeOpt::~AMDGPUPeepholeOpt() { -} - -const char * -AMDGPUPeepholeOpt::getPassName() const { - return "AMDGPU PeepHole Optimization Pass"; -} - -bool -containsPointerType(Type *Ty) { - if (!Ty) { - return false; - } - switch(Ty->getTypeID()) { - default: - return false; - case Type::StructTyID: { - const StructType *ST = dyn_cast<StructType>(Ty); - for (StructType::element_iterator stb = ST->element_begin(), - ste = ST->element_end(); stb != ste; ++stb) { - if (!containsPointerType(*stb)) { - continue; - } - return true; - } - break; - } - case Type::VectorTyID: - case Type::ArrayTyID: - return containsPointerType(dyn_cast<SequentialType>(Ty)->getElementType()); - case Type::PointerTyID: - return true; - }; - return false; -} - -bool -AMDGPUPeepholeOpt::dumpAllIntoArena(Function &F) { - bool dumpAll = false; - for (Function::const_arg_iterator cab = F.arg_begin(), - cae = F.arg_end(); cab != cae; ++cab) { - const Argument *arg = cab; - const PointerType *PT = dyn_cast<PointerType>(arg->getType()); - if (!PT) { - continue; - } - Type *DereferencedType = PT->getElementType(); - if (!dyn_cast<StructType>(DereferencedType) - ) { - continue; - } - if (!containsPointerType(DereferencedType)) { - continue; - } - // FIXME: Because a pointer inside of a struct/union may be aliased to - // another pointer we need to take the conservative approach and place all - // pointers into the arena until more advanced detection is implemented. - dumpAll = true; - } - return dumpAll; -} -void -AMDGPUPeepholeOpt::doIsConstCallConversionIfNeeded() { - if (isConstVec.empty()) { - return; - } - for (unsigned x = 0, y = isConstVec.size(); x < y; ++x) { - CallInst *CI = isConstVec[x]; - Constant *CV = dyn_cast<Constant>(CI->getOperand(0)); - Type *aType = Type::getInt32Ty(*mCTX); - Value *Val = (CV != NULL) ? ConstantInt::get(aType, 1) - : ConstantInt::get(aType, 0); - CI->replaceAllUsesWith(Val); - CI->eraseFromParent(); - } - isConstVec.clear(); -} -void -AMDGPUPeepholeOpt::doAtomicConversionIfNeeded(Function &F) { - // Don't do anything if we don't have any atomic operations. - if (atomicFuncs.empty()) { - return; - } - // Change the function name for the atomic if it is required - uint32_t size = atomicFuncs.size(); - for (uint32_t x = 0; x < size; ++x) { - atomicFuncs[x].first->setOperand( - atomicFuncs[x].first->getNumOperands()-1, - atomicFuncs[x].second); - - } - mChanged = true; - if (mConvertAtomics) { - return; - } -} - -bool -AMDGPUPeepholeOpt::runOnFunction(Function &MF) { - mChanged = false; - mF = &MF; - mSTM = &TM.getSubtarget<AMDGPUSubtarget>(); - if (mDebug) { - MF.dump(); - } - mCTX = &MF.getType()->getContext(); - mConvertAtomics = true; - safeNestedForEach(MF.begin(), MF.end(), MF.begin()->begin(), - std::bind1st(std::mem_fun(&AMDGPUPeepholeOpt::instLevelOptimizations), - this)); - - doAtomicConversionIfNeeded(MF); - doIsConstCallConversionIfNeeded(); - - if (mDebug) { - MF.dump(); - } - return mChanged; -} - -bool -AMDGPUPeepholeOpt::optimizeCallInst(BasicBlock::iterator *bbb) { - Instruction *inst = (*bbb); - CallInst *CI = dyn_cast<CallInst>(inst); - if (!CI) { - return false; - } - if (isSigned24BitOps(CI)) { - expandSigned24BitOps(CI); - ++(*bbb); - CI->eraseFromParent(); - return true; - } - if (propagateSamplerInst(CI)) { - return false; - } - if (expandBFI(CI) || expandBFM(CI)) { - ++(*bbb); - CI->eraseFromParent(); - return true; - } - if (convertAccurateDivide(CI)) { - expandAccurateDivide(CI); - ++(*bbb); - CI->eraseFromParent(); - return true; - } - - StringRef calleeName = CI->getOperand(CI->getNumOperands()-1)->getName(); - if (calleeName.startswith("__amdil_is_constant")) { - // If we do not have optimizations, then this - // cannot be properly evaluated, so we add the - // call instruction to a vector and process - // them at the end of processing after the - // samplers have been correctly handled. - if (optLevel == CodeGenOpt::None) { - isConstVec.push_back(CI); - return false; - } else { - Constant *CV = dyn_cast<Constant>(CI->getOperand(0)); - Type *aType = Type::getInt32Ty(*mCTX); - Value *Val = (CV != NULL) ? ConstantInt::get(aType, 1) - : ConstantInt::get(aType, 0); - CI->replaceAllUsesWith(Val); - ++(*bbb); - CI->eraseFromParent(); - return true; - } - } - - if (calleeName.equals("__amdil_is_asic_id_i32")) { - ConstantInt *CV = dyn_cast<ConstantInt>(CI->getOperand(0)); - Type *aType = Type::getInt32Ty(*mCTX); - Value *Val = CV; - if (Val) { - Val = ConstantInt::get(aType, - mSTM->device()->getDeviceFlag() & CV->getZExtValue()); - } else { - Val = ConstantInt::get(aType, 0); - } - CI->replaceAllUsesWith(Val); - ++(*bbb); - CI->eraseFromParent(); - return true; - } - Function *F = dyn_cast<Function>(CI->getOperand(CI->getNumOperands()-1)); - if (!F) { - return false; - } - if (F->getName().startswith("__atom") && !CI->getNumUses() - && F->getName().find("_xchg") == StringRef::npos) { - std::string buffer(F->getName().str() + "_noret"); - F = dyn_cast<Function>( - F->getParent()->getOrInsertFunction(buffer, F->getFunctionType())); - atomicFuncs.push_back(std::make_pair(CI, F)); - } - - if (!mSTM->device()->isSupported(AMDGPUDeviceInfo::ArenaSegment) - && !mSTM->device()->isSupported(AMDGPUDeviceInfo::MultiUAV)) { - return false; - } - if (!mConvertAtomics) { - return false; - } - StringRef name = F->getName(); - if (name.startswith("__atom") && name.find("_g") != StringRef::npos) { - mConvertAtomics = false; - } - return false; -} - -bool -AMDGPUPeepholeOpt::setupBitInsert(Instruction *base, - Instruction *&src, - Constant *&mask, - Constant *&shift) { - if (!base) { - if (mDebug) { - dbgs() << "Null pointer passed into function.\n"; - } - return false; - } - bool andOp = false; - if (base->getOpcode() == Instruction::Shl) { - shift = dyn_cast<Constant>(base->getOperand(1)); - } else if (base->getOpcode() == Instruction::And) { - mask = dyn_cast<Constant>(base->getOperand(1)); - andOp = true; - } else { - if (mDebug) { - dbgs() << "Failed setup with no Shl or And instruction on base opcode!\n"; - } - // If the base is neither a Shl or a And, we don't fit any of the patterns above. - return false; - } - src = dyn_cast<Instruction>(base->getOperand(0)); - if (!src) { - if (mDebug) { - dbgs() << "Failed setup since the base operand is not an instruction!\n"; - } - return false; - } - // If we find an 'and' operation, then we don't need to - // find the next operation as we already know the - // bits that are valid at this point. - if (andOp) { - return true; - } - if (src->getOpcode() == Instruction::Shl && !shift) { - shift = dyn_cast<Constant>(src->getOperand(1)); - src = dyn_cast<Instruction>(src->getOperand(0)); - } else if (src->getOpcode() == Instruction::And && !mask) { - mask = dyn_cast<Constant>(src->getOperand(1)); - } - if (!mask && !shift) { - if (mDebug) { - dbgs() << "Failed setup since both mask and shift are NULL!\n"; - } - // Did not find a constant mask or a shift. - return false; - } - return true; -} -bool -AMDGPUPeepholeOpt::optimizeBitInsert(Instruction *inst) { - if (!inst) { - return false; - } - if (!inst->isBinaryOp()) { - return false; - } - if (inst->getOpcode() != Instruction::Or) { - return false; - } - if (optLevel == CodeGenOpt::None) { - return false; - } - // We want to do an optimization on a sequence of ops that in the end equals a - // single ISA instruction. - // The base pattern for this optimization is - ((A & B) << C) | ((D & E) << F) - // Some simplified versions of this pattern are as follows: - // (A & B) | (D & E) when B & E == 0 && C == 0 && F == 0 - // ((A & B) << C) | (D & E) when B ^ E == 0 && (1 << C) >= E - // (A & B) | ((D & E) << F) when B ^ E == 0 && (1 << F) >= B - // (A & B) | (D << F) when (1 << F) >= B - // (A << C) | (D & E) when (1 << C) >= E - if (mSTM->device()->getGeneration() == AMDGPUDeviceInfo::HD4XXX) { - // The HD4XXX hardware doesn't support the ubit_insert instruction. - return false; - } - Type *aType = inst->getType(); - bool isVector = aType->isVectorTy(); - int numEle = 1; - // This optimization only works on 32bit integers. - if (aType->getScalarType() - != Type::getInt32Ty(inst->getContext())) { - return false; - } - if (isVector) { - const VectorType *VT = dyn_cast<VectorType>(aType); - numEle = VT->getNumElements(); - // We currently cannot support more than 4 elements in a intrinsic and we - // cannot support Vec3 types. - if (numEle > 4 || numEle == 3) { - return false; - } - } - // TODO: Handle vectors. - if (isVector) { - if (mDebug) { - dbgs() << "!!! Vectors are not supported yet!\n"; - } - return false; - } - Instruction *LHSSrc = NULL, *RHSSrc = NULL; - Constant *LHSMask = NULL, *RHSMask = NULL; - Constant *LHSShift = NULL, *RHSShift = NULL; - Instruction *LHS = dyn_cast<Instruction>(inst->getOperand(0)); - Instruction *RHS = dyn_cast<Instruction>(inst->getOperand(1)); - if (!setupBitInsert(LHS, LHSSrc, LHSMask, LHSShift)) { - if (mDebug) { - dbgs() << "Found an OR Operation that failed setup!\n"; - inst->dump(); - if (LHS) { LHS->dump(); } - if (LHSSrc) { LHSSrc->dump(); } - if (LHSMask) { LHSMask->dump(); } - if (LHSShift) { LHSShift->dump(); } - } - // There was an issue with the setup for BitInsert. - return false; - } - if (!setupBitInsert(RHS, RHSSrc, RHSMask, RHSShift)) { - if (mDebug) { - dbgs() << "Found an OR Operation that failed setup!\n"; - inst->dump(); - if (RHS) { RHS->dump(); } - if (RHSSrc) { RHSSrc->dump(); } - if (RHSMask) { RHSMask->dump(); } - if (RHSShift) { RHSShift->dump(); } - } - // There was an issue with the setup for BitInsert. - return false; - } - if (mDebug) { - dbgs() << "Found an OR operation that can possible be optimized to ubit insert!\n"; - dbgs() << "Op: "; inst->dump(); - dbgs() << "LHS: "; if (LHS) { LHS->dump(); } else { dbgs() << "(None)\n"; } - dbgs() << "LHS Src: "; if (LHSSrc) { LHSSrc->dump(); } else { dbgs() << "(None)\n"; } - dbgs() << "LHS Mask: "; if (LHSMask) { LHSMask->dump(); } else { dbgs() << "(None)\n"; } - dbgs() << "LHS Shift: "; if (LHSShift) { LHSShift->dump(); } else { dbgs() << "(None)\n"; } - dbgs() << "RHS: "; if (RHS) { RHS->dump(); } else { dbgs() << "(None)\n"; } - dbgs() << "RHS Src: "; if (RHSSrc) { RHSSrc->dump(); } else { dbgs() << "(None)\n"; } - dbgs() << "RHS Mask: "; if (RHSMask) { RHSMask->dump(); } else { dbgs() << "(None)\n"; } - dbgs() << "RHS Shift: "; if (RHSShift) { RHSShift->dump(); } else { dbgs() << "(None)\n"; } - } - Constant *offset = NULL; - Constant *width = NULL; - uint32_t lhsMaskVal = 0, rhsMaskVal = 0; - uint32_t lhsShiftVal = 0, rhsShiftVal = 0; - uint32_t lhsMaskWidth = 0, rhsMaskWidth = 0; - uint32_t lhsMaskOffset = 0, rhsMaskOffset = 0; - lhsMaskVal = (LHSMask - ? dyn_cast<ConstantInt>(LHSMask)->getZExtValue() : 0); - rhsMaskVal = (RHSMask - ? dyn_cast<ConstantInt>(RHSMask)->getZExtValue() : 0); - lhsShiftVal = (LHSShift - ? dyn_cast<ConstantInt>(LHSShift)->getZExtValue() : 0); - rhsShiftVal = (RHSShift - ? dyn_cast<ConstantInt>(RHSShift)->getZExtValue() : 0); - lhsMaskWidth = lhsMaskVal ? CountPopulation_32(lhsMaskVal) : 32 - lhsShiftVal; - rhsMaskWidth = rhsMaskVal ? CountPopulation_32(rhsMaskVal) : 32 - rhsShiftVal; - lhsMaskOffset = lhsMaskVal ? CountTrailingZeros_32(lhsMaskVal) : lhsShiftVal; - rhsMaskOffset = rhsMaskVal ? CountTrailingZeros_32(rhsMaskVal) : rhsShiftVal; - // TODO: Handle the case of A & B | D & ~B(i.e. inverted masks). - if ((lhsMaskVal || rhsMaskVal) && !(lhsMaskVal ^ rhsMaskVal)) { - return false; - } - if (lhsMaskOffset >= (rhsMaskWidth + rhsMaskOffset)) { - offset = ConstantInt::get(aType, lhsMaskOffset, false); - width = ConstantInt::get(aType, lhsMaskWidth, false); - RHSSrc = RHS; - if (!isMask_32(lhsMaskVal) && !isShiftedMask_32(lhsMaskVal)) { - return false; - } - if (!LHSShift) { - LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset, - "MaskShr", LHS); - } else if (lhsShiftVal != lhsMaskOffset) { - LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset, - "MaskShr", LHS); - } - if (mDebug) { - dbgs() << "Optimizing LHS!\n"; - } - } else if (rhsMaskOffset >= (lhsMaskWidth + lhsMaskOffset)) { - offset = ConstantInt::get(aType, rhsMaskOffset, false); - width = ConstantInt::get(aType, rhsMaskWidth, false); - LHSSrc = RHSSrc; - RHSSrc = LHS; - if (!isMask_32(rhsMaskVal) && !isShiftedMask_32(rhsMaskVal)) { - return false; - } - if (!RHSShift) { - LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset, - "MaskShr", RHS); - } else if (rhsShiftVal != rhsMaskOffset) { - LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset, - "MaskShr", RHS); - } - if (mDebug) { - dbgs() << "Optimizing RHS!\n"; - } - } else { - if (mDebug) { - dbgs() << "Failed constraint 3!\n"; - } - return false; - } - if (mDebug) { - dbgs() << "Width: "; if (width) { width->dump(); } else { dbgs() << "(0)\n"; } - dbgs() << "Offset: "; if (offset) { offset->dump(); } else { dbgs() << "(0)\n"; } - dbgs() << "LHSSrc: "; if (LHSSrc) { LHSSrc->dump(); } else { dbgs() << "(0)\n"; } - dbgs() << "RHSSrc: "; if (RHSSrc) { RHSSrc->dump(); } else { dbgs() << "(0)\n"; } - } - if (!offset || !width) { - if (mDebug) { - dbgs() << "Either width or offset are NULL, failed detection!\n"; - } - return false; - } - // Lets create the function signature. - std::vector<Type *> callTypes; - callTypes.push_back(aType); - callTypes.push_back(aType); - callTypes.push_back(aType); - callTypes.push_back(aType); - FunctionType *funcType = FunctionType::get(aType, callTypes, false); - std::string name = "__amdil_ubit_insert"; - if (isVector) { name += "_v" + itostr(numEle) + "u32"; } else { name += "_u32"; } - Function *Func = - dyn_cast<Function>(inst->getParent()->getParent()->getParent()-> - getOrInsertFunction(StringRef(name), funcType)); - Value *Operands[4] = { - width, - offset, - LHSSrc, - RHSSrc - }; - CallInst *CI = CallInst::Create(Func, Operands, "BitInsertOpt"); - if (mDebug) { - dbgs() << "Old Inst: "; - inst->dump(); - dbgs() << "New Inst: "; - CI->dump(); - dbgs() << "\n\n"; - } - CI->insertBefore(inst); - inst->replaceAllUsesWith(CI); - return true; -} - -bool -AMDGPUPeepholeOpt::optimizeBitExtract(Instruction *inst) { - if (!inst) { - return false; - } - if (!inst->isBinaryOp()) { - return false; - } - if (inst->getOpcode() != Instruction::And) { - return false; - } - if (optLevel == CodeGenOpt::None) { - return false; - } - // We want to do some simple optimizations on Shift right/And patterns. The - // basic optimization is to turn (A >> B) & C where A is a 32bit type, B is a - // value smaller than 32 and C is a mask. If C is a constant value, then the - // following transformation can occur. For signed integers, it turns into the - // function call dst = __amdil_ibit_extract(log2(C), B, A) For unsigned - // integers, it turns into the function call dst = - // __amdil_ubit_extract(log2(C), B, A) The function __amdil_[u|i]bit_extract - // can be found in Section 7.9 of the ATI IL spec of the stream SDK for - // Evergreen hardware. - if (mSTM->device()->getGeneration() == AMDGPUDeviceInfo::HD4XXX) { - // This does not work on HD4XXX hardware. - return false; - } - Type *aType = inst->getType(); - bool isVector = aType->isVectorTy(); - - // XXX Support vector types - if (isVector) { - return false; - } - int numEle = 1; - // This only works on 32bit integers - if (aType->getScalarType() - != Type::getInt32Ty(inst->getContext())) { - return false; - } - if (isVector) { - const VectorType *VT = dyn_cast<VectorType>(aType); - numEle = VT->getNumElements(); - // We currently cannot support more than 4 elements in a intrinsic and we - // cannot support Vec3 types. - if (numEle > 4 || numEle == 3) { - return false; - } - } - BinaryOperator *ShiftInst = dyn_cast<BinaryOperator>(inst->getOperand(0)); - // If the first operand is not a shift instruction, then we can return as it - // doesn't match this pattern. - if (!ShiftInst || !ShiftInst->isShift()) { - return false; - } - // If we are a shift left, then we need don't match this pattern. - if (ShiftInst->getOpcode() == Instruction::Shl) { - return false; - } - bool isSigned = ShiftInst->isArithmeticShift(); - Constant *AndMask = dyn_cast<Constant>(inst->getOperand(1)); - Constant *ShrVal = dyn_cast<Constant>(ShiftInst->getOperand(1)); - // Lets make sure that the shift value and the and mask are constant integers. - if (!AndMask || !ShrVal) { - return false; - } - Constant *newMaskConst; - Constant *shiftValConst; - if (isVector) { - // Handle the vector case - std::vector<Constant *> maskVals; - std::vector<Constant *> shiftVals; - ConstantVector *AndMaskVec = dyn_cast<ConstantVector>(AndMask); - ConstantVector *ShrValVec = dyn_cast<ConstantVector>(ShrVal); - Type *scalarType = AndMaskVec->getType()->getScalarType(); - assert(AndMaskVec->getNumOperands() == - ShrValVec->getNumOperands() && "cannot have a " - "combination where the number of elements to a " - "shift and an and are different!"); - for (size_t x = 0, y = AndMaskVec->getNumOperands(); x < y; ++x) { - ConstantInt *AndCI = dyn_cast<ConstantInt>(AndMaskVec->getOperand(x)); - ConstantInt *ShiftIC = dyn_cast<ConstantInt>(ShrValVec->getOperand(x)); - if (!AndCI || !ShiftIC) { - return false; - } - uint32_t maskVal = (uint32_t)AndCI->getZExtValue(); - if (!isMask_32(maskVal)) { - return false; - } - maskVal = (uint32_t)CountTrailingOnes_32(maskVal); - uint32_t shiftVal = (uint32_t)ShiftIC->getZExtValue(); - // If the mask or shiftval is greater than the bitcount, then break out. - if (maskVal >= 32 || shiftVal >= 32) { - return false; - } - // If the mask val is greater than the the number of original bits left - // then this optimization is invalid. - if (maskVal > (32 - shiftVal)) { - return false; - } - maskVals.push_back(ConstantInt::get(scalarType, maskVal, isSigned)); - shiftVals.push_back(ConstantInt::get(scalarType, shiftVal, isSigned)); - } - newMaskConst = ConstantVector::get(maskVals); - shiftValConst = ConstantVector::get(shiftVals); - } else { - // Handle the scalar case - uint32_t maskVal = (uint32_t)dyn_cast<ConstantInt>(AndMask)->getZExtValue(); - // This must be a mask value where all lower bits are set to 1 and then any - // bit higher is set to 0. - if (!isMask_32(maskVal)) { - return false; - } - maskVal = (uint32_t)CountTrailingOnes_32(maskVal); - // Count the number of bits set in the mask, this is the width of the - // resulting bit set that is extracted from the source value. - uint32_t shiftVal = (uint32_t)dyn_cast<ConstantInt>(ShrVal)->getZExtValue(); - // If the mask or shift val is greater than the bitcount, then break out. - if (maskVal >= 32 || shiftVal >= 32) { - return false; - } - // If the mask val is greater than the the number of original bits left then - // this optimization is invalid. - if (maskVal > (32 - shiftVal)) { - return false; - } - newMaskConst = ConstantInt::get(aType, maskVal, isSigned); - shiftValConst = ConstantInt::get(aType, shiftVal, isSigned); - } - // Lets create the function signature. - std::vector<Type *> callTypes; - callTypes.push_back(aType); - callTypes.push_back(aType); - callTypes.push_back(aType); - FunctionType *funcType = FunctionType::get(aType, callTypes, false); - std::string name = "llvm.AMDGPU.bit.extract.u32"; - if (isVector) { - name += ".v" + itostr(numEle) + "i32"; - } else { - name += "."; - } - // Lets create the function. - Function *Func = - dyn_cast<Function>(inst->getParent()->getParent()->getParent()-> - getOrInsertFunction(StringRef(name), funcType)); - Value *Operands[3] = { - ShiftInst->getOperand(0), - shiftValConst, - newMaskConst - }; - // Lets create the Call with the operands - CallInst *CI = CallInst::Create(Func, Operands, "ByteExtractOpt"); - CI->setDoesNotAccessMemory(); - CI->insertBefore(inst); - inst->replaceAllUsesWith(CI); - return true; -} - -bool -AMDGPUPeepholeOpt::expandBFI(CallInst *CI) { - if (!CI) { - return false; - } - Value *LHS = CI->getOperand(CI->getNumOperands() - 1); - if (!LHS->getName().startswith("__amdil_bfi")) { - return false; - } - Type* type = CI->getOperand(0)->getType(); - Constant *negOneConst = NULL; - if (type->isVectorTy()) { - std::vector<Constant *> negOneVals; - negOneConst = ConstantInt::get(CI->getContext(), - APInt(32, StringRef("-1"), 10)); - for (size_t x = 0, - y = dyn_cast<VectorType>(type)->getNumElements(); x < y; ++x) { - negOneVals.push_back(negOneConst); - } - negOneConst = ConstantVector::get(negOneVals); - } else { - negOneConst = ConstantInt::get(CI->getContext(), - APInt(32, StringRef("-1"), 10)); - } - // __amdil_bfi => (A & B) | (~A & C) - BinaryOperator *lhs = - BinaryOperator::Create(Instruction::And, CI->getOperand(0), - CI->getOperand(1), "bfi_and", CI); - BinaryOperator *rhs = - BinaryOperator::Create(Instruction::Xor, CI->getOperand(0), negOneConst, - "bfi_not", CI); - rhs = BinaryOperator::Create(Instruction::And, rhs, CI->getOperand(2), - "bfi_and", CI); - lhs = BinaryOperator::Create(Instruction::Or, lhs, rhs, "bfi_or", CI); - CI->replaceAllUsesWith(lhs); - return true; -} - -bool -AMDGPUPeepholeOpt::expandBFM(CallInst *CI) { - if (!CI) { - return false; - } - Value *LHS = CI->getOperand(CI->getNumOperands() - 1); - if (!LHS->getName().startswith("__amdil_bfm")) { - return false; - } - // __amdil_bfm => ((1 << (src0 & 0x1F)) - 1) << (src1 & 0x1f) - Constant *newMaskConst = NULL; - Constant *newShiftConst = NULL; - Type* type = CI->getOperand(0)->getType(); - if (type->isVectorTy()) { - std::vector<Constant*> newMaskVals, newShiftVals; - newMaskConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 0x1F); - newShiftConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 1); - for (size_t x = 0, - y = dyn_cast<VectorType>(type)->getNumElements(); x < y; ++x) { - newMaskVals.push_back(newMaskConst); - newShiftVals.push_back(newShiftConst); - } - newMaskConst = ConstantVector::get(newMaskVals); - newShiftConst = ConstantVector::get(newShiftVals); - } else { - newMaskConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 0x1F); - newShiftConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 1); - } - BinaryOperator *lhs = - BinaryOperator::Create(Instruction::And, CI->getOperand(0), - newMaskConst, "bfm_mask", CI); - lhs = BinaryOperator::Create(Instruction::Shl, newShiftConst, - lhs, "bfm_shl", CI); - lhs = BinaryOperator::Create(Instruction::Sub, lhs, - newShiftConst, "bfm_sub", CI); - BinaryOperator *rhs = - BinaryOperator::Create(Instruction::And, CI->getOperand(1), - newMaskConst, "bfm_mask", CI); - lhs = BinaryOperator::Create(Instruction::Shl, lhs, rhs, "bfm_shl", CI); - CI->replaceAllUsesWith(lhs); - return true; -} - -bool -AMDGPUPeepholeOpt::instLevelOptimizations(BasicBlock::iterator *bbb) { - Instruction *inst = (*bbb); - if (optimizeCallInst(bbb)) { - return true; - } - if (optimizeBitExtract(inst)) { - return false; - } - if (optimizeBitInsert(inst)) { - return false; - } - if (correctMisalignedMemOp(inst)) { - return false; - } - return false; -} -bool -AMDGPUPeepholeOpt::correctMisalignedMemOp(Instruction *inst) { - LoadInst *linst = dyn_cast<LoadInst>(inst); - StoreInst *sinst = dyn_cast<StoreInst>(inst); - unsigned alignment; - Type* Ty = inst->getType(); - if (linst) { - alignment = linst->getAlignment(); - Ty = inst->getType(); - } else if (sinst) { - alignment = sinst->getAlignment(); - Ty = sinst->getValueOperand()->getType(); - } else { - return false; - } - unsigned size = getTypeSize(Ty); - if (size == alignment || size < alignment) { - return false; - } - if (!Ty->isStructTy()) { - return false; - } - if (alignment < 4) { - if (linst) { - linst->setAlignment(0); - return true; - } else if (sinst) { - sinst->setAlignment(0); - return true; - } - } - return false; -} -bool -AMDGPUPeepholeOpt::isSigned24BitOps(CallInst *CI) { - if (!CI) { - return false; - } - Value *LHS = CI->getOperand(CI->getNumOperands() - 1); - std::string namePrefix = LHS->getName().substr(0, 14); - if (namePrefix != "__amdil_imad24" && namePrefix != "__amdil_imul24" - && namePrefix != "__amdil__imul24_high") { - return false; - } - if (mSTM->device()->usesHardware(AMDGPUDeviceInfo::Signed24BitOps)) { - return false; - } - return true; -} - -void -AMDGPUPeepholeOpt::expandSigned24BitOps(CallInst *CI) { - assert(isSigned24BitOps(CI) && "Must be a " - "signed 24 bit operation to call this function!"); - Value *LHS = CI->getOperand(CI->getNumOperands()-1); - // On 7XX and 8XX we do not have signed 24bit, so we need to - // expand it to the following: - // imul24 turns into 32bit imul - // imad24 turns into 32bit imad - // imul24_high turns into 32bit imulhigh - if (LHS->getName().substr(0, 14) == "__amdil_imad24") { - Type *aType = CI->getOperand(0)->getType(); - bool isVector = aType->isVectorTy(); - int numEle = isVector ? dyn_cast<VectorType>(aType)->getNumElements() : 1; - std::vector<Type*> callTypes; - callTypes.push_back(CI->getOperand(0)->getType()); - callTypes.push_back(CI->getOperand(1)->getType()); - callTypes.push_back(CI->getOperand(2)->getType()); - FunctionType *funcType = - FunctionType::get(CI->getOperand(0)->getType(), callTypes, false); - std::string name = "__amdil_imad"; - if (isVector) { - name += "_v" + itostr(numEle) + "i32"; - } else { - name += "_i32"; - } - Function *Func = dyn_cast<Function>( - CI->getParent()->getParent()->getParent()-> - getOrInsertFunction(StringRef(name), funcType)); - Value *Operands[3] = { - CI->getOperand(0), - CI->getOperand(1), - CI->getOperand(2) - }; - CallInst *nCI = CallInst::Create(Func, Operands, "imad24"); - nCI->insertBefore(CI); - CI->replaceAllUsesWith(nCI); - } else if (LHS->getName().substr(0, 14) == "__amdil_imul24") { - BinaryOperator *mulOp = - BinaryOperator::Create(Instruction::Mul, CI->getOperand(0), - CI->getOperand(1), "imul24", CI); - CI->replaceAllUsesWith(mulOp); - } else if (LHS->getName().substr(0, 19) == "__amdil_imul24_high") { - Type *aType = CI->getOperand(0)->getType(); - - bool isVector = aType->isVectorTy(); - int numEle = isVector ? dyn_cast<VectorType>(aType)->getNumElements() : 1; - std::vector<Type*> callTypes; - callTypes.push_back(CI->getOperand(0)->getType()); - callTypes.push_back(CI->getOperand(1)->getType()); - FunctionType *funcType = - FunctionType::get(CI->getOperand(0)->getType(), callTypes, false); - std::string name = "__amdil_imul_high"; - if (isVector) { - name += "_v" + itostr(numEle) + "i32"; - } else { - name += "_i32"; - } - Function *Func = dyn_cast<Function>( - CI->getParent()->getParent()->getParent()-> - getOrInsertFunction(StringRef(name), funcType)); - Value *Operands[2] = { - CI->getOperand(0), - CI->getOperand(1) - }; - CallInst *nCI = CallInst::Create(Func, Operands, "imul24_high"); - nCI->insertBefore(CI); - CI->replaceAllUsesWith(nCI); - } -} - -bool -AMDGPUPeepholeOpt::isRWGLocalOpt(CallInst *CI) { - return (CI != NULL - && CI->getOperand(CI->getNumOperands() - 1)->getName() - == "__amdil_get_local_size_int"); -} - -bool -AMDGPUPeepholeOpt::convertAccurateDivide(CallInst *CI) { - if (!CI) { - return false; - } - if (mSTM->device()->getGeneration() == AMDGPUDeviceInfo::HD6XXX - && (mSTM->getDeviceName() == "cayman")) { - return false; - } - return CI->getOperand(CI->getNumOperands() - 1)->getName().substr(0, 20) - == "__amdil_improved_div"; -} - -void -AMDGPUPeepholeOpt::expandAccurateDivide(CallInst *CI) { - assert(convertAccurateDivide(CI) - && "expanding accurate divide can only happen if it is expandable!"); - BinaryOperator *divOp = - BinaryOperator::Create(Instruction::FDiv, CI->getOperand(0), - CI->getOperand(1), "fdiv32", CI); - CI->replaceAllUsesWith(divOp); -} - -bool -AMDGPUPeepholeOpt::propagateSamplerInst(CallInst *CI) { - if (optLevel != CodeGenOpt::None) { - return false; - } - - if (!CI) { - return false; - } - - unsigned funcNameIdx = 0; - funcNameIdx = CI->getNumOperands() - 1; - StringRef calleeName = CI->getOperand(funcNameIdx)->getName(); - if (calleeName != "__amdil_image2d_read_norm" - && calleeName != "__amdil_image2d_read_unnorm" - && calleeName != "__amdil_image3d_read_norm" - && calleeName != "__amdil_image3d_read_unnorm") { - return false; - } - - unsigned samplerIdx = 2; - samplerIdx = 1; - Value *sampler = CI->getOperand(samplerIdx); - LoadInst *lInst = dyn_cast<LoadInst>(sampler); - if (!lInst) { - return false; - } - - if (lInst->getPointerAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) { - return false; - } - - GlobalVariable *gv = dyn_cast<GlobalVariable>(lInst->getPointerOperand()); - // If we are loading from what is not a global value, then we - // fail and return. - if (!gv) { - return false; - } - - // If we don't have an initializer or we have an initializer and - // the initializer is not a 32bit integer, we fail. - if (!gv->hasInitializer() - || !gv->getInitializer()->getType()->isIntegerTy(32)) { - return false; - } - - // Now that we have the global variable initializer, lets replace - // all uses of the load instruction with the samplerVal and - // reparse the __amdil_is_constant() function. - Constant *samplerVal = gv->getInitializer(); - lInst->replaceAllUsesWith(samplerVal); - return true; -} - -bool -AMDGPUPeepholeOpt::doInitialization(Module &M) { - return false; -} - -bool -AMDGPUPeepholeOpt::doFinalization(Module &M) { - return false; -} - -void -AMDGPUPeepholeOpt::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired<MachineFunctionAnalysis>(); - FunctionPass::getAnalysisUsage(AU); - AU.setPreservesAll(); -} - -size_t AMDGPUPeepholeOpt::getTypeSize(Type * const T, bool dereferencePtr) { - size_t size = 0; - if (!T) { - return size; - } - switch (T->getTypeID()) { - case Type::X86_FP80TyID: - case Type::FP128TyID: - case Type::PPC_FP128TyID: - case Type::LabelTyID: - assert(0 && "These types are not supported by this backend"); - default: - case Type::FloatTyID: - case Type::DoubleTyID: - size = T->getPrimitiveSizeInBits() >> 3; - break; - case Type::PointerTyID: - size = getTypeSize(dyn_cast<PointerType>(T), dereferencePtr); - break; - case Type::IntegerTyID: - size = getTypeSize(dyn_cast<IntegerType>(T), dereferencePtr); - break; - case Type::StructTyID: - size = getTypeSize(dyn_cast<StructType>(T), dereferencePtr); - break; - case Type::ArrayTyID: - size = getTypeSize(dyn_cast<ArrayType>(T), dereferencePtr); - break; - case Type::FunctionTyID: - size = getTypeSize(dyn_cast<FunctionType>(T), dereferencePtr); - break; - case Type::VectorTyID: - size = getTypeSize(dyn_cast<VectorType>(T), dereferencePtr); - break; - }; - return size; -} - -size_t AMDGPUPeepholeOpt::getTypeSize(StructType * const ST, - bool dereferencePtr) { - size_t size = 0; - if (!ST) { - return size; - } - Type *curType; - StructType::element_iterator eib; - StructType::element_iterator eie; - for (eib = ST->element_begin(), eie = ST->element_end(); eib != eie; ++eib) { - curType = *eib; - size += getTypeSize(curType, dereferencePtr); - } - return size; -} - -size_t AMDGPUPeepholeOpt::getTypeSize(IntegerType * const IT, - bool dereferencePtr) { - return IT ? (IT->getBitWidth() >> 3) : 0; -} - -size_t AMDGPUPeepholeOpt::getTypeSize(FunctionType * const FT, - bool dereferencePtr) { - assert(0 && "Should not be able to calculate the size of an function type"); - return 0; -} - -size_t AMDGPUPeepholeOpt::getTypeSize(ArrayType * const AT, - bool dereferencePtr) { - return (size_t)(AT ? (getTypeSize(AT->getElementType(), - dereferencePtr) * AT->getNumElements()) - : 0); -} - -size_t AMDGPUPeepholeOpt::getTypeSize(VectorType * const VT, - bool dereferencePtr) { - return VT ? (VT->getBitWidth() >> 3) : 0; -} - -size_t AMDGPUPeepholeOpt::getTypeSize(PointerType * const PT, - bool dereferencePtr) { - if (!PT) { - return 0; - } - Type *CT = PT->getElementType(); - if (CT->getTypeID() == Type::StructTyID && - PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { - return getTypeSize(dyn_cast<StructType>(CT)); - } else if (dereferencePtr) { - size_t size = 0; - for (size_t x = 0, y = PT->getNumContainedTypes(); x < y; ++x) { - size += getTypeSize(PT->getContainedType(x), dereferencePtr); - } - return size; - } else { - return 4; - } -} - -size_t AMDGPUPeepholeOpt::getTypeSize(OpaqueType * const OT, - bool dereferencePtr) { - //assert(0 && "Should not be able to calculate the size of an opaque type"); - return 4; -} diff --git a/lib/Target/R600/AMDILSIDevice.cpp b/lib/Target/R600/AMDILSIDevice.cpp deleted file mode 100644 index 0d1de3d..0000000 --- a/lib/Target/R600/AMDILSIDevice.cpp +++ /dev/null @@ -1,48 +0,0 @@ -//===-- AMDILSIDevice.cpp - Device Info for Southern Islands GPUs ---------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -/// \file -//==-----------------------------------------------------------------------===// -#include "AMDILSIDevice.h" -#include "AMDGPUSubtarget.h" -#include "AMDILEvergreenDevice.h" -#include "AMDILNIDevice.h" - -using namespace llvm; - -AMDGPUSIDevice::AMDGPUSIDevice(AMDGPUSubtarget *ST) - : AMDGPUEvergreenDevice(ST) { -} -AMDGPUSIDevice::~AMDGPUSIDevice() { -} - -size_t -AMDGPUSIDevice::getMaxLDSSize() const { - if (usesHardware(AMDGPUDeviceInfo::LocalMem)) { - return MAX_LDS_SIZE_900; - } else { - return 0; - } -} - -uint32_t -AMDGPUSIDevice::getGeneration() const { - return AMDGPUDeviceInfo::HD7XXX; -} - -std::string -AMDGPUSIDevice::getDataLayout() const { - return std::string( - "e" - "-p:64:64:64" - "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64" - "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128" - "-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024" - "-v2048:2048:2048" - "-n32:64" - ); -} diff --git a/lib/Target/R600/AMDILSIDevice.h b/lib/Target/R600/AMDILSIDevice.h deleted file mode 100644 index 5b2cb25..0000000 --- a/lib/Target/R600/AMDILSIDevice.h +++ /dev/null @@ -1,39 +0,0 @@ -//===------- AMDILSIDevice.h - Define SI Device for AMDIL -*- C++ -*------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//==-----------------------------------------------------------------------===// -// -/// \file -/// \brief Interface for the subtarget data classes. -/// -/// This file will define the interface that each generation needs to -/// implement in order to correctly answer queries on the capabilities of the -/// specific hardware. -//===---------------------------------------------------------------------===// -#ifndef AMDILSIDEVICE_H -#define AMDILSIDEVICE_H -#include "AMDILEvergreenDevice.h" - -namespace llvm { -class AMDGPUSubtarget; -//===---------------------------------------------------------------------===// -// SI generation of devices and their respective sub classes -//===---------------------------------------------------------------------===// - -/// \brief The AMDGPUSIDevice is the base class for all Southern Island series -/// of cards. -class AMDGPUSIDevice : public AMDGPUEvergreenDevice { -public: - AMDGPUSIDevice(AMDGPUSubtarget*); - virtual ~AMDGPUSIDevice(); - virtual size_t getMaxLDSSize() const; - virtual uint32_t getGeneration() const; - virtual std::string getDataLayout() const; -}; - -} // namespace llvm -#endif // AMDILSIDEVICE_H diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt index 2ad2047..1b79bf5 100644 --- a/lib/Target/R600/CMakeLists.txt +++ b/lib/Target/R600/CMakeLists.txt @@ -12,17 +12,10 @@ tablegen(LLVM AMDGPUGenAsmWriter.inc -gen-asm-writer) add_public_tablegen_target(AMDGPUCommonTableGen) add_llvm_target(R600CodeGen - AMDIL7XXDevice.cpp AMDILCFGStructurizer.cpp - AMDILDevice.cpp - AMDILDeviceInfo.cpp - AMDILEvergreenDevice.cpp AMDILIntrinsicInfo.cpp AMDILISelDAGToDAG.cpp AMDILISelLowering.cpp - AMDILNIDevice.cpp - AMDILPeepholeOptimizer.cpp - AMDILSIDevice.cpp AMDGPUAsmPrinter.cpp AMDGPUFrameLowering.cpp AMDGPUIndirectAddressing.cpp @@ -42,8 +35,10 @@ add_llvm_target(R600CodeGen R600ISelLowering.cpp R600MachineFunctionInfo.cpp R600MachineScheduler.cpp + R600OptimizeVectorRegisters.cpp R600Packetizer.cpp R600RegisterInfo.cpp + R600TextureIntrinsicsReplacer.cpp SIAnnotateControlFlow.cpp SIInsertWaits.cpp SIInstrInfo.cpp diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp index 10547a5..8c814e0 100644 --- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp +++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp @@ -10,13 +10,14 @@ #include "AMDGPUInstPrinter.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" -#include "llvm/MC/MCInst.h" #include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" using namespace llvm; void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) { + OS.flush(); printInstruction(MI, OS); printAnnotation(OS, Annot); @@ -67,11 +68,14 @@ void AMDGPUInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo, } void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo, - raw_ostream &O, StringRef Asm) { + raw_ostream &O, StringRef Asm, + StringRef Default) { const MCOperand &Op = MI->getOperand(OpNo); assert(Op.isImm()); if (Op.getImm() == 1) { O << Asm; + } else { + O << Default; } } @@ -98,7 +102,7 @@ void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo, void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - printIfSet(MI, OpNo, O, " *"); + printIfSet(MI, OpNo, O.indent(25 - O.GetNumBytesInBuffer()), "*", " "); } void AMDGPUInstPrinter::printNeg(const MCInst *MI, unsigned OpNo, @@ -169,4 +173,86 @@ void AMDGPUInstPrinter::printSel(const MCInst *MI, unsigned OpNo, O << "." << chans[chan]; } +void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + int BankSwizzle = MI->getOperand(OpNo).getImm(); + switch (BankSwizzle) { + case 1: + O << "BS:VEC_021"; + break; + case 2: + O << "BS:VEC_120"; + break; + case 3: + O << "BS:VEC_102"; + break; + case 4: + O << "BS:VEC_201"; + break; + case 5: + O << "BS:VEC_210"; + break; + default: + break; + } + return; +} + +void AMDGPUInstPrinter::printRSel(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + unsigned Sel = MI->getOperand(OpNo).getImm(); + switch (Sel) { + case 0: + O << "X"; + break; + case 1: + O << "Y"; + break; + case 2: + O << "Z"; + break; + case 3: + O << "W"; + break; + case 4: + O << "0"; + break; + case 5: + O << "1"; + break; + case 7: + O << "_"; + break; + default: + break; + } +} + +void AMDGPUInstPrinter::printCT(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + unsigned CT = MI->getOperand(OpNo).getImm(); + switch (CT) { + case 0: + O << "U"; + break; + case 1: + O << "N"; + break; + default: + break; + } +} + +void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + int KCacheMode = MI->getOperand(OpNo).getImm(); + if (KCacheMode > 0) { + int KCacheBank = MI->getOperand(OpNo - 2).getImm(); + O << "CB" << KCacheBank <<":"; + int KCacheAddr = MI->getOperand(OpNo + 2).getImm(); + int LineSize = (KCacheMode == 1)?16:32; + O << KCacheAddr * 16 << "-" << KCacheAddr * 16 + LineSize; + } +} + #include "AMDGPUGenAsmWriter.inc" diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h index 767a708..4c1dfa6 100644 --- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h +++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h @@ -35,7 +35,8 @@ private: void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm); + void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, + StringRef Asm, StringRef Default = ""); void printAbs(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printClamp(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printLiteral(const MCInst *MI, unsigned OpNo, raw_ostream &O); @@ -47,6 +48,10 @@ private: void printUpdatePred(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printWrite(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printSel(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printBankSwizzle(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printRSel(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printCT(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O); }; } // End namespace llvm diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp index a3397f3..9a36903 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp @@ -82,6 +82,8 @@ void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, // ELFAMDGPUAsmBackend class //===----------------------------------------------------------------------===// +namespace { + class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend { public: ELFAMDGPUAsmBackend(const Target &T) : AMDGPUAsmBackend(T) { } @@ -91,6 +93,8 @@ public: } }; +} // end anonymous namespace + MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, StringRef TT, StringRef CPU) { return new ELFAMDGPUAsmBackend(T); diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp index 2aae26a..f1c44df 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp @@ -11,7 +11,7 @@ #include "AMDGPUMCAsmInfo.h" using namespace llvm; -AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() { +AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() { HasSingleParameterDotFile = false; WeakDefDirective = 0; //===------------------------------------------------------------------===// diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h index 3ad0fa6..485167b 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h @@ -17,12 +17,11 @@ #include "llvm/MC/MCAsmInfo.h" namespace llvm { -class Target; class StringRef; class AMDGPUMCAsmInfo : public MCAsmInfo { public: - explicit AMDGPUMCAsmInfo(const Target &T, StringRef &TT); + explicit AMDGPUMCAsmInfo(StringRef &TT); const char* getDataASDirective(unsigned int Size, unsigned int AS) const; const MCSection* getNonexecutableStackSection(MCContext &CTX) const; }; diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp index 45d009c..61d70bb 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp @@ -78,7 +78,7 @@ static MCCodeEmitter *createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, if (STI.getFeatureBits() & AMDGPU::Feature64BitPtr) { return createSIMCCodeEmitter(MCII, MRI, STI, Ctx); } else { - return createR600MCCodeEmitter(MCII, MRI, STI, Ctx); + return createR600MCCodeEmitter(MCII, MRI, STI); } } diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h index 09d0d5b..abb0320 100644 --- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h +++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h @@ -33,8 +33,7 @@ extern Target TheAMDGPUTarget; MCCodeEmitter *createR600MCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, - const MCSubtargetInfo &STI, - MCContext &Ctx); + const MCSubtargetInfo &STI); MCCodeEmitter *createSIMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, diff --git a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp index 7c83d86..4d6c25c 100644 --- a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp +++ b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp @@ -9,12 +9,8 @@ // /// \file /// -/// This code emitter outputs bytecode that is understood by the r600g driver -/// in the Mesa [1] project. The bytecode is very similar to the hardware's ISA, -/// but it still needs to be run through a finalizer in order to be executed -/// by the GPU. -/// -/// [1] http://www.mesa3d.org/ +/// \brief The R600 code emitter produces machine code that can be executed +/// directly on the GPU device. // //===----------------------------------------------------------------------===// @@ -30,9 +26,6 @@ #include "llvm/Support/raw_ostream.h" #include <stdio.h> -#define SRC_BYTE_COUNT 11 -#define DST_BYTE_COUNT 5 - using namespace llvm; namespace { @@ -43,13 +36,12 @@ class R600MCCodeEmitter : public AMDGPUMCCodeEmitter { const MCInstrInfo &MCII; const MCRegisterInfo &MRI; const MCSubtargetInfo &STI; - MCContext &Ctx; public: R600MCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri, - const MCSubtargetInfo &sti, MCContext &ctx) - : MCII(mcii), MRI(mri), STI(sti), Ctx(ctx) { } + const MCSubtargetInfo &sti) + : MCII(mcii), MRI(mri), STI(sti) { } /// \brief Encode the instruction and write it to the OS. virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS, @@ -60,30 +52,14 @@ public: SmallVectorImpl<MCFixup> &Fixups) const; private: - void EmitALUInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups, - raw_ostream &OS) const; - void EmitSrc(const MCInst &MI, unsigned OpIdx, raw_ostream &OS) const; - void EmitSrcISA(const MCInst &MI, unsigned RegOpIdx, unsigned SelOpIdx, - raw_ostream &OS) const; - void EmitDst(const MCInst &MI, raw_ostream &OS) const; - void EmitFCInstr(const MCInst &MI, raw_ostream &OS) const; - - void EmitNullBytes(unsigned int byteCount, raw_ostream &OS) const; - void EmitByte(unsigned int byte, raw_ostream &OS) const; - void EmitTwoBytes(uint32_t bytes, raw_ostream &OS) const; - void Emit(uint32_t value, raw_ostream &OS) const; void Emit(uint64_t value, raw_ostream &OS) const; unsigned getHWRegChan(unsigned reg) const; unsigned getHWReg(unsigned regNo) const; - bool isFCOp(unsigned opcode) const; - bool isTexOp(unsigned opcode) const; - bool isFlagSet(const MCInst &MI, unsigned Operand, unsigned Flag) const; - }; } // End anonymous namespace @@ -95,16 +71,6 @@ enum RegElement { ELEMENT_W }; -enum InstrTypes { - INSTR_ALU = 0, - INSTR_TEX, - INSTR_FC, - INSTR_NATIVE, - INSTR_VTX, - INSTR_EXPORT, - INSTR_CFALU -}; - enum FCInstr { FC_IF_PREDICATE = 0, FC_ELSE, @@ -115,386 +81,63 @@ enum FCInstr { FC_CONTINUE }; -enum TextureTypes { - TEXTURE_1D = 1, - TEXTURE_2D, - TEXTURE_3D, - TEXTURE_CUBE, - TEXTURE_RECT, - TEXTURE_SHADOW1D, - TEXTURE_SHADOW2D, - TEXTURE_SHADOWRECT, - TEXTURE_1D_ARRAY, - TEXTURE_2D_ARRAY, - TEXTURE_SHADOW1D_ARRAY, - TEXTURE_SHADOW2D_ARRAY -}; - MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, - const MCSubtargetInfo &STI, - MCContext &Ctx) { - return new R600MCCodeEmitter(MCII, MRI, STI, Ctx); + const MCSubtargetInfo &STI) { + return new R600MCCodeEmitter(MCII, MRI, STI); } void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups) const { - if (isFCOp(MI.getOpcode())){ - EmitFCInstr(MI, OS); - } else if (MI.getOpcode() == AMDGPU::RETURN || + const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); + if (MI.getOpcode() == AMDGPU::RETURN || MI.getOpcode() == AMDGPU::FETCH_CLAUSE || MI.getOpcode() == AMDGPU::ALU_CLAUSE || MI.getOpcode() == AMDGPU::BUNDLE || MI.getOpcode() == AMDGPU::KILL) { return; - } else { - switch(MI.getOpcode()) { - case AMDGPU::RAT_WRITE_CACHELESS_32_eg: - case AMDGPU::RAT_WRITE_CACHELESS_128_eg: { - uint64_t inst = getBinaryCodeForInstr(MI, Fixups); - EmitByte(INSTR_NATIVE, OS); - Emit(inst, OS); - break; - } - case AMDGPU::CONSTANT_LOAD_eg: - case AMDGPU::VTX_READ_PARAM_8_eg: - case AMDGPU::VTX_READ_PARAM_16_eg: - case AMDGPU::VTX_READ_PARAM_32_eg: - case AMDGPU::VTX_READ_PARAM_128_eg: - case AMDGPU::VTX_READ_GLOBAL_8_eg: - case AMDGPU::VTX_READ_GLOBAL_32_eg: - case AMDGPU::VTX_READ_GLOBAL_128_eg: - case AMDGPU::TEX_VTX_CONSTBUF: - case AMDGPU::TEX_VTX_TEXBUF : { - uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups); - uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset - InstWord2 |= 1 << 19; - - EmitByte(INSTR_NATIVE, OS); - Emit(InstWord01, OS); - EmitByte(INSTR_NATIVE, OS); - Emit(InstWord2, OS); - Emit((u_int32_t) 0, OS); - break; - } - case AMDGPU::TEX_LD: - case AMDGPU::TEX_GET_TEXTURE_RESINFO: - case AMDGPU::TEX_SAMPLE: - case AMDGPU::TEX_SAMPLE_C: - case AMDGPU::TEX_SAMPLE_L: - case AMDGPU::TEX_SAMPLE_C_L: - case AMDGPU::TEX_SAMPLE_LB: - case AMDGPU::TEX_SAMPLE_C_LB: - case AMDGPU::TEX_SAMPLE_G: - case AMDGPU::TEX_SAMPLE_C_G: - case AMDGPU::TEX_GET_GRADIENTS_H: - case AMDGPU::TEX_GET_GRADIENTS_V: - case AMDGPU::TEX_SET_GRADIENTS_H: - case AMDGPU::TEX_SET_GRADIENTS_V: { - unsigned Opcode = MI.getOpcode(); - bool HasOffsets = (Opcode == AMDGPU::TEX_LD); - unsigned OpOffset = HasOffsets ? 3 : 0; - int64_t Sampler = MI.getOperand(OpOffset + 3).getImm(); - int64_t TextureType = MI.getOperand(OpOffset + 4).getImm(); - - uint32_t SrcSelect[4] = {0, 1, 2, 3}; - uint32_t Offsets[3] = {0, 0, 0}; - uint64_t CoordType[4] = {1, 1, 1, 1}; - - if (HasOffsets) - for (unsigned i = 0; i < 3; i++) { - int SignedOffset = MI.getOperand(i + 2).getImm(); - Offsets[i] = (SignedOffset & 0x1F); - } - - - if (TextureType == TEXTURE_RECT || - TextureType == TEXTURE_SHADOWRECT) { - CoordType[ELEMENT_X] = 0; - CoordType[ELEMENT_Y] = 0; - } - - if (TextureType == TEXTURE_1D_ARRAY || - TextureType == TEXTURE_SHADOW1D_ARRAY) { - if (Opcode == AMDGPU::TEX_SAMPLE_C_L || - Opcode == AMDGPU::TEX_SAMPLE_C_LB) { - CoordType[ELEMENT_Y] = 0; - } else { - CoordType[ELEMENT_Z] = 0; - SrcSelect[ELEMENT_Z] = ELEMENT_Y; - } - } else if (TextureType == TEXTURE_2D_ARRAY || - TextureType == TEXTURE_SHADOW2D_ARRAY) { - CoordType[ELEMENT_Z] = 0; - } - - - if ((TextureType == TEXTURE_SHADOW1D || - TextureType == TEXTURE_SHADOW2D || - TextureType == TEXTURE_SHADOWRECT || - TextureType == TEXTURE_SHADOW1D_ARRAY) && - Opcode != AMDGPU::TEX_SAMPLE_C_L && - Opcode != AMDGPU::TEX_SAMPLE_C_LB) { - SrcSelect[ELEMENT_W] = ELEMENT_Z; - } - - uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups) | - CoordType[ELEMENT_X] << 60 | CoordType[ELEMENT_Y] << 61 | - CoordType[ELEMENT_Z] << 62 | CoordType[ELEMENT_W] << 63; + } else if (IS_VTX(Desc)) { + uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups); + uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset + InstWord2 |= 1 << 19; + + Emit(InstWord01, OS); + Emit(InstWord2, OS); + Emit((uint32_t) 0, OS); + } else if (IS_TEX(Desc)) { + int64_t Sampler = MI.getOperand(14).getImm(); + + int64_t SrcSelect[4] = { + MI.getOperand(2).getImm(), + MI.getOperand(3).getImm(), + MI.getOperand(4).getImm(), + MI.getOperand(5).getImm() + }; + int64_t Offsets[3] = { + MI.getOperand(6).getImm() & 0x1F, + MI.getOperand(7).getImm() & 0x1F, + MI.getOperand(8).getImm() & 0x1F + }; + + uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups); uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 | SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 | SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 | Offsets[2] << 10; - EmitByte(INSTR_NATIVE, OS); Emit(Word01, OS); - EmitByte(INSTR_NATIVE, OS); Emit(Word2, OS); - Emit((u_int32_t) 0, OS); - break; - } - case AMDGPU::CF_ALU: - case AMDGPU::CF_ALU_PUSH_BEFORE: { - uint64_t Inst = getBinaryCodeForInstr(MI, Fixups); - EmitByte(INSTR_NATIVE, OS); - Emit(Inst, OS); - break; - } - case AMDGPU::CF_CALL_FS_EG: - case AMDGPU::CF_CALL_FS_R600: - return; - case AMDGPU::CF_TC_EG: - case AMDGPU::CF_VC_EG: - case AMDGPU::CF_TC_R600: - case AMDGPU::CF_VC_R600: - case AMDGPU::WHILE_LOOP_EG: - case AMDGPU::END_LOOP_EG: - case AMDGPU::LOOP_BREAK_EG: - case AMDGPU::CF_CONTINUE_EG: - case AMDGPU::CF_JUMP_EG: - case AMDGPU::CF_ELSE_EG: - case AMDGPU::POP_EG: - case AMDGPU::WHILE_LOOP_R600: - case AMDGPU::END_LOOP_R600: - case AMDGPU::LOOP_BREAK_R600: - case AMDGPU::CF_CONTINUE_R600: - case AMDGPU::CF_JUMP_R600: - case AMDGPU::CF_ELSE_R600: - case AMDGPU::POP_R600: - case AMDGPU::EG_ExportSwz: - case AMDGPU::R600_ExportSwz: - case AMDGPU::EG_ExportBuf: - case AMDGPU::R600_ExportBuf: - case AMDGPU::PAD: - case AMDGPU::CF_END_R600: - case AMDGPU::CF_END_EG: - case AMDGPU::CF_END_CM: { - uint64_t Inst = getBinaryCodeForInstr(MI, Fixups); - EmitByte(INSTR_NATIVE, OS); - Emit(Inst, OS); - break; - } - default: - uint64_t Inst = getBinaryCodeForInstr(MI, Fixups); - EmitByte(INSTR_NATIVE, OS); - Emit(Inst, OS); - break; - } - } -} - -void R600MCCodeEmitter::EmitALUInstr(const MCInst &MI, - SmallVectorImpl<MCFixup> &Fixups, - raw_ostream &OS) const { - const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode()); - - // Emit instruction type - EmitByte(INSTR_ALU, OS); - - uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups); - - //older alu have different encoding for instructions with one or two src - //parameters. - if ((STI.getFeatureBits() & AMDGPU::FeatureR600ALUInst) && - !(MCDesc.TSFlags & R600_InstFlag::OP3)) { - uint64_t ISAOpCode = InstWord01 & (0x3FFULL << 39); - InstWord01 &= ~(0x3FFULL << 39); - InstWord01 |= ISAOpCode << 1; - } - - unsigned SrcNum = MCDesc.TSFlags & R600_InstFlag::OP3 ? 3 : - MCDesc.TSFlags & R600_InstFlag::OP2 ? 2 : 1; - - EmitByte(SrcNum, OS); - - const unsigned SrcOps[3][2] = { - {R600Operands::SRC0, R600Operands::SRC0_SEL}, - {R600Operands::SRC1, R600Operands::SRC1_SEL}, - {R600Operands::SRC2, R600Operands::SRC2_SEL} - }; - - for (unsigned SrcIdx = 0; SrcIdx < SrcNum; ++SrcIdx) { - unsigned RegOpIdx = R600Operands::ALUOpTable[SrcNum-1][SrcOps[SrcIdx][0]]; - unsigned SelOpIdx = R600Operands::ALUOpTable[SrcNum-1][SrcOps[SrcIdx][1]]; - EmitSrcISA(MI, RegOpIdx, SelOpIdx, OS); - } - - Emit(InstWord01, OS); - return; -} - -void R600MCCodeEmitter::EmitSrc(const MCInst &MI, unsigned OpIdx, - raw_ostream &OS) const { - const MCOperand &MO = MI.getOperand(OpIdx); - union { - float f; - uint32_t i; - } Value; - Value.i = 0; - // Emit the source select (2 bytes). For GPRs, this is the register index. - // For other potential instruction operands, (e.g. constant registers) the - // value of the source select is defined in the r600isa docs. - if (MO.isReg()) { - unsigned reg = MO.getReg(); - EmitTwoBytes(getHWReg(reg), OS); - if (reg == AMDGPU::ALU_LITERAL_X) { - unsigned ImmOpIndex = MI.getNumOperands() - 1; - MCOperand ImmOp = MI.getOperand(ImmOpIndex); - if (ImmOp.isFPImm()) { - Value.f = ImmOp.getFPImm(); - } else { - assert(ImmOp.isImm()); - Value.i = ImmOp.getImm(); - } - } - } else { - // XXX: Handle other operand types. - EmitTwoBytes(0, OS); - } - - // Emit the source channel (1 byte) - if (MO.isReg()) { - EmitByte(getHWRegChan(MO.getReg()), OS); + Emit((uint32_t) 0, OS); } else { - EmitByte(0, OS); - } - - // XXX: Emit isNegated (1 byte) - if ((!(isFlagSet(MI, OpIdx, MO_FLAG_ABS))) - && (isFlagSet(MI, OpIdx, MO_FLAG_NEG) || - (MO.isReg() && - (MO.getReg() == AMDGPU::NEG_ONE || MO.getReg() == AMDGPU::NEG_HALF)))){ - EmitByte(1, OS); - } else { - EmitByte(0, OS); - } - - // Emit isAbsolute (1 byte) - if (isFlagSet(MI, OpIdx, MO_FLAG_ABS)) { - EmitByte(1, OS); - } else { - EmitByte(0, OS); - } - - // XXX: Emit relative addressing mode (1 byte) - EmitByte(0, OS); - - // Emit kc_bank, This will be adjusted later by r600_asm - EmitByte(0, OS); - - // Emit the literal value, if applicable (4 bytes). - Emit(Value.i, OS); - -} - -void R600MCCodeEmitter::EmitSrcISA(const MCInst &MI, unsigned RegOpIdx, - unsigned SelOpIdx, raw_ostream &OS) const { - const MCOperand &RegMO = MI.getOperand(RegOpIdx); - const MCOperand &SelMO = MI.getOperand(SelOpIdx); - - union { - float f; - uint32_t i; - } InlineConstant; - InlineConstant.i = 0; - // Emit source type (1 byte) and source select (4 bytes). For GPRs type is 0 - // and select is 0 (GPR index is encoded in the instr encoding. For constants - // type is 1 and select is the original const select passed from the driver. - unsigned Reg = RegMO.getReg(); - if (Reg == AMDGPU::ALU_CONST) { - EmitByte(1, OS); - uint32_t Sel = SelMO.getImm(); - Emit(Sel, OS); - } else { - EmitByte(0, OS); - Emit((uint32_t)0, OS); - } - - if (Reg == AMDGPU::ALU_LITERAL_X) { - unsigned ImmOpIndex = MI.getNumOperands() - 2; - MCOperand ImmOp = MI.getOperand(ImmOpIndex); - if (ImmOp.isFPImm()) { - InlineConstant.f = ImmOp.getFPImm(); - } else { - assert(ImmOp.isImm()); - InlineConstant.i = ImmOp.getImm(); + uint64_t Inst = getBinaryCodeForInstr(MI, Fixups); + if ((STI.getFeatureBits() & AMDGPU::FeatureR600ALUInst) && + ((Desc.TSFlags & R600_InstFlag::OP1) || + Desc.TSFlags & R600_InstFlag::OP2)) { + uint64_t ISAOpCode = Inst & (0x3FFULL << 39); + Inst &= ~(0x3FFULL << 39); + Inst |= ISAOpCode << 1; } - } - - // Emit the literal value, if applicable (4 bytes). - Emit(InlineConstant.i, OS); -} - -void R600MCCodeEmitter::EmitFCInstr(const MCInst &MI, raw_ostream &OS) const { - - // Emit instruction type - EmitByte(INSTR_FC, OS); - - // Emit SRC - unsigned NumOperands = MI.getNumOperands(); - if (NumOperands > 0) { - assert(NumOperands == 1); - EmitSrc(MI, 0, OS); - } else { - EmitNullBytes(SRC_BYTE_COUNT, OS); - } - - // Emit FC Instruction - enum FCInstr instr; - switch (MI.getOpcode()) { - case AMDGPU::PREDICATED_BREAK: - instr = FC_BREAK_PREDICATE; - break; - case AMDGPU::CONTINUE: - instr = FC_CONTINUE; - break; - case AMDGPU::IF_PREDICATE_SET: - instr = FC_IF_PREDICATE; - break; - case AMDGPU::ELSE: - instr = FC_ELSE; - break; - case AMDGPU::ENDIF: - instr = FC_ENDIF; - break; - case AMDGPU::ENDLOOP: - instr = FC_ENDLOOP; - break; - case AMDGPU::WHILELOOP: - instr = FC_BGNLOOP; - break; - default: - abort(); - break; - } - EmitByte(instr, OS); -} - -void R600MCCodeEmitter::EmitNullBytes(unsigned int ByteCount, - raw_ostream &OS) const { - - for (unsigned int i = 0; i < ByteCount; i++) { - EmitByte(0, OS); + Emit(Inst, OS); } } @@ -502,12 +145,6 @@ void R600MCCodeEmitter::EmitByte(unsigned int Byte, raw_ostream &OS) const { OS.write((uint8_t) Byte & 0xff); } -void R600MCCodeEmitter::EmitTwoBytes(unsigned int Bytes, - raw_ostream &OS) const { - OS.write((uint8_t) (Bytes & 0xff)); - OS.write((uint8_t) ((Bytes >> 8) & 0xff)); -} - void R600MCCodeEmitter::Emit(uint32_t Value, raw_ostream &OS) const { for (unsigned i = 0; i < 4; i++) { OS.write((uint8_t) ((Value >> (8 * i)) & 0xff)); @@ -545,55 +182,4 @@ uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI, } } -//===----------------------------------------------------------------------===// -// Encoding helper functions -//===----------------------------------------------------------------------===// - -bool R600MCCodeEmitter::isFCOp(unsigned opcode) const { - switch(opcode) { - default: return false; - case AMDGPU::PREDICATED_BREAK: - case AMDGPU::CONTINUE: - case AMDGPU::IF_PREDICATE_SET: - case AMDGPU::ELSE: - case AMDGPU::ENDIF: - case AMDGPU::ENDLOOP: - case AMDGPU::WHILELOOP: - return true; - } -} - -bool R600MCCodeEmitter::isTexOp(unsigned opcode) const { - switch(opcode) { - default: return false; - case AMDGPU::TEX_LD: - case AMDGPU::TEX_GET_TEXTURE_RESINFO: - case AMDGPU::TEX_SAMPLE: - case AMDGPU::TEX_SAMPLE_C: - case AMDGPU::TEX_SAMPLE_L: - case AMDGPU::TEX_SAMPLE_C_L: - case AMDGPU::TEX_SAMPLE_LB: - case AMDGPU::TEX_SAMPLE_C_LB: - case AMDGPU::TEX_SAMPLE_G: - case AMDGPU::TEX_SAMPLE_C_G: - case AMDGPU::TEX_GET_GRADIENTS_H: - case AMDGPU::TEX_GET_GRADIENTS_V: - case AMDGPU::TEX_SET_GRADIENTS_H: - case AMDGPU::TEX_SET_GRADIENTS_V: - return true; - } -} - -bool R600MCCodeEmitter::isFlagSet(const MCInst &MI, unsigned Operand, - unsigned Flag) const { - const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode()); - unsigned FlagIndex = GET_FLAG_OPERAND_IDX(MCDesc.TSFlags); - if (FlagIndex == 0) { - return false; - } - assert(MI.getOperand(FlagIndex).isImm()); - return !!((MI.getOperand(FlagIndex).getImm() >> - (NUM_MO_FLAGS * Operand)) & Flag); -} - #include "AMDGPUGenMCCodeEmitter.inc" diff --git a/lib/Target/R600/Processors.td b/lib/Target/R600/Processors.td index e024e66..81f407e 100644 --- a/lib/Target/R600/Processors.td +++ b/lib/Target/R600/Processors.td @@ -1,4 +1,4 @@ -//===-- Processors.td - TODO: Add brief description -------===// +//===-- Processors.td - R600 Processor definitions ------------------------===// // // The LLVM Compiler Infrastructure // @@ -6,46 +6,45 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// -// -// AMDIL processors supported. -// -//===----------------------------------------------------------------------===// class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Features> : Processor<Name, itin, Features>; def : Proc<"", R600_VLIW5_Itin, - [FeatureR600ALUInst, FeatureVertexCache]>; + [FeatureR600, FeatureVertexCache]>; def : Proc<"r600", R600_VLIW5_Itin, - [FeatureR600ALUInst , FeatureVertexCache]>; + [FeatureR600 , FeatureVertexCache]>; def : Proc<"rs880", R600_VLIW5_Itin, - [FeatureR600ALUInst]>; + [FeatureR600]>; def : Proc<"rv670", R600_VLIW5_Itin, - [FeatureR600ALUInst, FeatureFP64, FeatureVertexCache]>; + [FeatureR600, FeatureFP64, FeatureVertexCache]>; def : Proc<"rv710", R600_VLIW5_Itin, - [FeatureVertexCache]>; + [FeatureR700, FeatureVertexCache]>; def : Proc<"rv730", R600_VLIW5_Itin, - [FeatureVertexCache]>; + [FeatureR700, FeatureVertexCache]>; def : Proc<"rv770", R600_VLIW5_Itin, - [FeatureFP64, FeatureVertexCache]>; + [FeatureR700, FeatureFP64, FeatureVertexCache]>; def : Proc<"cedar", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; + [FeatureEvergreen, FeatureVertexCache]>; def : Proc<"redwood", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; + [FeatureEvergreen, FeatureVertexCache]>; def : Proc<"sumo", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages]>; + [FeatureEvergreen]>; def : Proc<"juniper", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; + [FeatureEvergreen, FeatureVertexCache]>; def : Proc<"cypress", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages, FeatureFP64, FeatureVertexCache]>; + [FeatureEvergreen, FeatureFP64, FeatureVertexCache]>; def : Proc<"barts", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; + [FeatureNorthernIslands, FeatureVertexCache]>; def : Proc<"turks", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages, FeatureVertexCache]>; + [FeatureNorthernIslands, FeatureVertexCache]>; def : Proc<"caicos", R600_VLIW5_Itin, - [FeatureByteAddress, FeatureImages]>; + [FeatureNorthernIslands]>; def : Proc<"cayman", R600_VLIW4_Itin, - [FeatureByteAddress, FeatureImages, FeatureFP64]>;def : Proc<"SI", SI_Itin, [Feature64BitPtr, FeatureFP64]>; -def : Proc<"tahiti", SI_Itin, [Feature64BitPtr, FeatureFP64]>; -def : Proc<"pitcairn", SI_Itin, [Feature64BitPtr, FeatureFP64]>; -def : Proc<"verde", SI_Itin, [Feature64BitPtr, FeatureFP64]>; -def : Proc<"oland", SI_Itin, [Feature64BitPtr, FeatureFP64]>; + [FeatureNorthernIslands, FeatureFP64, FeatureCaymanISA]>; + +def : Proc<"SI", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"tahiti", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"pitcairn", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"verde", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"oland", SI_Itin, [FeatureSouthernIslands]>; +def : Proc<"hainan", SI_Itin, [FeatureSouthernIslands]>; diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/R600/R600ControlFlowFinalizer.cpp index 0995795..ab29d60 100644 --- a/lib/Target/R600/R600ControlFlowFinalizer.cpp +++ b/lib/Target/R600/R600ControlFlowFinalizer.cpp @@ -14,8 +14,6 @@ #define DEBUG_TYPE "r600cf" #include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" - #include "AMDGPU.h" #include "R600Defines.h" #include "R600InstrInfo.h" @@ -24,8 +22,11 @@ #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; -namespace llvm { +namespace { class R600ControlFlowFinalizer : public MachineFunctionPass { @@ -48,7 +49,7 @@ private: static char ID; const R600InstrInfo *TII; - const R600RegisterInfo &TRI; + const R600RegisterInfo *TRI; unsigned MaxFetchInst; const AMDGPUSubtarget &ST; @@ -64,7 +65,7 @@ private: const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const { unsigned Opcode = 0; - bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX); + bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN); switch (CFI) { case CF_TC: Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600; @@ -97,7 +98,7 @@ private: Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600; break; case CF_END: - if (ST.device()->getDeviceFlag() == OCL_DEVICE_CAYMAN) { + if (ST.hasCaymanISA()) { Opcode = AMDGPU::CF_END_CM; break; } @@ -109,28 +110,33 @@ private: } bool isCompatibleWithClause(const MachineInstr *MI, - std::set<unsigned> &DstRegs, std::set<unsigned> &SrcRegs) const { + std::set<unsigned> &DstRegs) const { unsigned DstMI, SrcMI; for (MachineInstr::const_mop_iterator I = MI->operands_begin(), E = MI->operands_end(); I != E; ++I) { const MachineOperand &MO = *I; if (!MO.isReg()) continue; - if (MO.isDef()) - DstMI = MO.getReg(); + if (MO.isDef()) { + unsigned Reg = MO.getReg(); + if (AMDGPU::R600_Reg128RegClass.contains(Reg)) + DstMI = Reg; + else + DstMI = TRI->getMatchingSuperReg(Reg, + TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)), + &AMDGPU::R600_Reg128RegClass); + } if (MO.isUse()) { unsigned Reg = MO.getReg(); if (AMDGPU::R600_Reg128RegClass.contains(Reg)) SrcMI = Reg; else - SrcMI = TRI.getMatchingSuperReg(Reg, - TRI.getSubRegFromChannel(TRI.getHWRegChan(Reg)), + SrcMI = TRI->getMatchingSuperReg(Reg, + TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)), &AMDGPU::R600_Reg128RegClass); } } - if ((DstRegs.find(SrcMI) == DstRegs.end()) && - (SrcRegs.find(DstMI) == SrcRegs.end())) { - SrcRegs.insert(SrcMI); + if ((DstRegs.find(SrcMI) == DstRegs.end())) { DstRegs.insert(DstMI); return true; } else @@ -144,16 +150,16 @@ private: std::vector<MachineInstr *> ClauseContent; unsigned AluInstCount = 0; bool IsTex = TII->usesTextureCache(ClauseHead); - std::set<unsigned> DstRegs, SrcRegs; + std::set<unsigned> DstRegs; for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) { if (IsTrivialInst(I)) continue; - if (AluInstCount > MaxFetchInst) + if (AluInstCount >= MaxFetchInst) break; if ((IsTex && !TII->usesTextureCache(I)) || (!IsTex && !TII->usesVertexCache(I))) break; - if (!isCompatibleWithClause(I, DstRegs, SrcRegs)) + if (!isCompatibleWithClause(I, DstRegs)) break; AluInstCount ++; ClauseContent.push_back(I); @@ -165,29 +171,27 @@ private: return ClauseFile(MIb, ClauseContent); } - void getLiteral(MachineInstr *MI, std::vector<unsigned> &Lits) const { + void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const { unsigned LiteralRegs[] = { AMDGPU::ALU_LITERAL_X, AMDGPU::ALU_LITERAL_Y, AMDGPU::ALU_LITERAL_Z, AMDGPU::ALU_LITERAL_W }; - for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { - MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg()) + const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs = + TII->getSrcs(MI); + for (unsigned i = 0, e = Srcs.size(); i < e; ++i) { + if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X) continue; - if (MO.getReg() != AMDGPU::ALU_LITERAL_X) - continue; - unsigned ImmIdx = TII->getOperandIdx(MI->getOpcode(), R600Operands::IMM); - int64_t Imm = MI->getOperand(ImmIdx).getImm(); - std::vector<unsigned>::iterator It = + int64_t Imm = Srcs[i].second; + std::vector<int64_t>::iterator It = std::find(Lits.begin(), Lits.end(), Imm); if (It != Lits.end()) { unsigned Index = It - Lits.begin(); - MO.setReg(LiteralRegs[Index]); + Srcs[i].first->setReg(LiteralRegs[Index]); } else { assert(Lits.size() < 4 && "Too many literals in Instruction Group"); - MO.setReg(LiteralRegs[Lits.size()]); + Srcs[i].first->setReg(LiteralRegs[Lits.size()]); Lits.push_back(Imm); } } @@ -221,7 +225,7 @@ private: } if (!I->isBundle() && !TII->isALUInstr(I->getOpcode())) break; - std::vector<unsigned> Literals; + std::vector<int64_t> Literals; if (I->isBundle()) { MachineInstr *DeleteMI = I; MachineBasicBlock::instr_iterator BI = I.getInstrIterator(); @@ -295,37 +299,38 @@ private: } unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const { - switch (ST.device()->getGeneration()) { - case AMDGPUDeviceInfo::HD4XXX: + switch (ST.getGeneration()) { + case AMDGPUSubtarget::R600: + case AMDGPUSubtarget::R700: if (hasPush) StackSubEntry += 2; break; - case AMDGPUDeviceInfo::HD5XXX: + case AMDGPUSubtarget::EVERGREEN: if (hasPush) StackSubEntry ++; - case AMDGPUDeviceInfo::HD6XXX: + case AMDGPUSubtarget::NORTHERN_ISLANDS: StackSubEntry += 2; break; + default: llvm_unreachable("Not a VLIW4/VLIW5 GPU"); } return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4 } public: R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID), - TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())), - TRI(TII->getRegisterInfo()), + TII (0), TRI(0), ST(tm.getSubtarget<AMDGPUSubtarget>()) { const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX) - MaxFetchInst = 8; - else - MaxFetchInst = 16; + MaxFetchInst = ST.getTexVTXClauseSize(); } virtual bool runOnMachineFunction(MachineFunction &MF) { + TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo()); + TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo()); + unsigned MaxStack = 0; unsigned CurrentStack = 0; - bool hasPush; + bool HasPush = false; for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME; ++MB) { MachineBasicBlock &MBB = *MB; @@ -337,6 +342,7 @@ public: BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()), getHWInstrDesc(CF_CALL_FS)); CfCount++; + MaxStack = 1; } std::vector<ClauseFile> FetchClauses, AluClauses; for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); @@ -354,7 +360,7 @@ public: case AMDGPU::CF_ALU_PUSH_BEFORE: CurrentStack++; MaxStack = std::max(MaxStack, CurrentStack); - hasPush = true; + HasPush = true; case AMDGPU::CF_ALU: I = MI; AluClauses.push_back(MakeALUClause(MBB, I)); @@ -475,7 +481,7 @@ public: break; } } - MFI->StackSize = getHWStackSize(MaxStack, hasPush); + MFI->StackSize = getHWStackSize(MaxStack, HasPush); } return false; @@ -488,7 +494,7 @@ public: char R600ControlFlowFinalizer::ID = 0; -} +} // end anonymous namespace llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) { diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h index 303ca73..aebe581 100644 --- a/lib/Target/R600/R600Defines.h +++ b/lib/Target/R600/R600Defines.h @@ -54,6 +54,9 @@ namespace R600_InstFlag { #define GET_REG_CHAN(reg) ((reg) >> HW_CHAN_SHIFT) #define GET_REG_INDEX(reg) ((reg) & HW_REG_MASK) +#define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST) +#define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST) + namespace R600Operands { enum Ops { DST, @@ -95,6 +98,106 @@ namespace R600Operands { {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17,18} }; + enum VecOps { + UPDATE_EXEC_MASK_X, + UPDATE_PREDICATE_X, + WRITE_X, + OMOD_X, + DST_REL_X, + CLAMP_X, + SRC0_X, + SRC0_NEG_X, + SRC0_REL_X, + SRC0_ABS_X, + SRC0_SEL_X, + SRC1_X, + SRC1_NEG_X, + SRC1_REL_X, + SRC1_ABS_X, + SRC1_SEL_X, + PRED_SEL_X, + UPDATE_EXEC_MASK_Y, + UPDATE_PREDICATE_Y, + WRITE_Y, + OMOD_Y, + DST_REL_Y, + CLAMP_Y, + SRC0_Y, + SRC0_NEG_Y, + SRC0_REL_Y, + SRC0_ABS_Y, + SRC0_SEL_Y, + SRC1_Y, + SRC1_NEG_Y, + SRC1_REL_Y, + SRC1_ABS_Y, + SRC1_SEL_Y, + PRED_SEL_Y, + UPDATE_EXEC_MASK_Z, + UPDATE_PREDICATE_Z, + WRITE_Z, + OMOD_Z, + DST_REL_Z, + CLAMP_Z, + SRC0_Z, + SRC0_NEG_Z, + SRC0_REL_Z, + SRC0_ABS_Z, + SRC0_SEL_Z, + SRC1_Z, + SRC1_NEG_Z, + SRC1_REL_Z, + SRC1_ABS_Z, + SRC1_SEL_Z, + PRED_SEL_Z, + UPDATE_EXEC_MASK_W, + UPDATE_PREDICATE_W, + WRITE_W, + OMOD_W, + DST_REL_W, + CLAMP_W, + SRC0_W, + SRC0_NEG_W, + SRC0_REL_W, + SRC0_ABS_W, + SRC0_SEL_W, + SRC1_W, + SRC1_NEG_W, + SRC1_REL_W, + SRC1_ABS_W, + SRC1_SEL_W, + PRED_SEL_W, + IMM_0, + IMM_1, + VEC_COUNT + }; + } +//===----------------------------------------------------------------------===// +// Config register definitions +//===----------------------------------------------------------------------===// + +#define R_02880C_DB_SHADER_CONTROL 0x02880C +#define S_02880C_KILL_ENABLE(x) (((x) & 0x1) << 6) + +// These fields are the same for all shader types and families. +#define S_NUM_GPRS(x) (((x) & 0xFF) << 0) +#define S_STACK_SIZE(x) (((x) & 0xFF) << 8) +//===----------------------------------------------------------------------===// +// R600, R700 Registers +//===----------------------------------------------------------------------===// + +#define R_028850_SQ_PGM_RESOURCES_PS 0x028850 +#define R_028868_SQ_PGM_RESOURCES_VS 0x028868 + +//===----------------------------------------------------------------------===// +// Evergreen, Northern Islands Registers +//===----------------------------------------------------------------------===// + +#define R_028844_SQ_PGM_RESOURCES_PS 0x028844 +#define R_028860_SQ_PGM_RESOURCES_VS 0x028860 +#define R_028878_SQ_PGM_RESOURCES_GS 0x028878 +#define R_0288D4_SQ_PGM_RESOURCES_LS 0x0288d4 + #endif // R600DEFINES_H_ diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp index 3fdc678..ff5ce5a 100644 --- a/lib/Target/R600/R600EmitClauseMarkers.cpp +++ b/lib/Target/R600/R600EmitClauseMarkers.cpp @@ -23,7 +23,9 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -namespace llvm { +using namespace llvm; + +namespace { class R600EmitClauseMarkersPass : public MachineFunctionPass { @@ -36,8 +38,7 @@ private: case AMDGPU::INTERP_PAIR_XY: case AMDGPU::INTERP_PAIR_ZW: case AMDGPU::INTERP_VEC_LOAD: - case AMDGPU::DOT4_eg_pseudo: - case AMDGPU::DOT4_r600_pseudo: + case AMDGPU::DOT_4: return 4; case AMDGPU::KILL: return 0; @@ -71,8 +72,7 @@ private: case AMDGPU::INTERP_PAIR_ZW: case AMDGPU::INTERP_VEC_LOAD: case AMDGPU::COPY: - case AMDGPU::DOT4_eg_pseudo: - case AMDGPU::DOT4_r600_pseudo: + case AMDGPU::DOT_4: return true; default: return false; @@ -89,31 +89,6 @@ private: } } - // Register Idx, then Const value - std::vector<std::pair<unsigned, unsigned> > ExtractConstRead(MachineInstr *MI) - const { - const R600Operands::Ops OpTable[3][2] = { - {R600Operands::SRC0, R600Operands::SRC0_SEL}, - {R600Operands::SRC1, R600Operands::SRC1_SEL}, - {R600Operands::SRC2, R600Operands::SRC2_SEL}, - }; - std::vector<std::pair<unsigned, unsigned> > Result; - - if (!TII->isALUInstr(MI->getOpcode())) - return Result; - for (unsigned j = 0; j < 3; j++) { - int SrcIdx = TII->getOperandIdx(MI->getOpcode(), OpTable[j][0]); - if (SrcIdx < 0) - break; - if (MI->getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST) { - unsigned Const = MI->getOperand( - TII->getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); - Result.push_back(std::pair<unsigned, unsigned>(SrcIdx, Const)); - } - } - return Result; - } - std::pair<unsigned, unsigned> getAccessedBankLine(unsigned Sel) const { // Sel is (512 + (kc_bank << 12) + ConstIndex) << 2 // (See also R600ISelLowering.cpp) @@ -131,9 +106,13 @@ private: bool SubstituteKCacheBank(MachineInstr *MI, std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const { std::vector<std::pair<unsigned, unsigned> > UsedKCache; - std::vector<std::pair<unsigned, unsigned> > Consts = ExtractConstRead(MI); - assert(TII->isALUInstr(MI->getOpcode()) && "Can't assign Const"); + const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Consts = + TII->getSrcs(MI); + assert((TII->isALUInstr(MI->getOpcode()) || + MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const"); for (unsigned i = 0, n = Consts.size(); i < n; ++i) { + if (Consts[i].first->getReg() != AMDGPU::ALU_CONST) + continue; unsigned Sel = Consts[i].second; unsigned Chan = Sel & 3, Index = ((Sel >> 2) - 512) & 31; unsigned KCacheIndex = Index * 4 + Chan; @@ -159,19 +138,22 @@ private: return false; } - for (unsigned i = 0, n = Consts.size(); i < n; ++i) { - switch(UsedKCache[i].first) { + for (unsigned i = 0, j = 0, n = Consts.size(); i < n; ++i) { + if (Consts[i].first->getReg() != AMDGPU::ALU_CONST) + continue; + switch(UsedKCache[j].first) { case 0: - MI->getOperand(Consts[i].first).setReg( - AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[i].second)); + Consts[i].first->setReg( + AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[j].second)); break; case 1: - MI->getOperand(Consts[i].first).setReg( - AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[i].second)); + Consts[i].first->setReg( + AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[j].second)); break; default: llvm_unreachable("Wrong Cache Line"); } + j++; } return true; } @@ -202,6 +184,9 @@ private: if (TII->isALUInstr(I->getOpcode()) && !SubstituteKCacheBank(I, KCacheBanks)) break; + if (I->getOpcode() == AMDGPU::DOT_4 && + !SubstituteKCacheBank(I, KCacheBanks)) + break; AluInstCount += OccupiedDwords(I); } unsigned Opcode = PushBeforeModifier ? @@ -220,9 +205,11 @@ private: public: R600EmitClauseMarkersPass(TargetMachine &tm) : MachineFunctionPass(ID), - TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { } + TII(0) { } virtual bool runOnMachineFunction(MachineFunction &MF) { + TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo()); + for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { MachineBasicBlock &MBB = *BB; @@ -246,7 +233,7 @@ public: char R600EmitClauseMarkersPass::ID = 0; -} +} // end anonymous namespace llvm::FunctionPass *llvm::createR600EmitClauseMarkers(TargetMachine &TM) { diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp index f8c900f..40c058f 100644 --- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp +++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp @@ -38,7 +38,7 @@ private: public: R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID), - TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { } + TII(0) { } virtual bool runOnMachineFunction(MachineFunction &MF); @@ -56,6 +56,7 @@ FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) { } bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { + TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo()); const R600RegisterInfo &TRI = TII->getRegisterInfo(); @@ -182,6 +183,45 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { MI.eraseFromParent(); continue; } + case AMDGPU::DOT_4: { + + const R600RegisterInfo &TRI = TII->getRegisterInfo(); + + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK; + + for (unsigned Chan = 0; Chan < 4; ++Chan) { + bool Mask = (Chan != TRI.getHWRegChan(DstReg)); + unsigned SubDstReg = + AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan); + MachineInstr *BMI = + TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg); + if (Chan > 0) { + BMI->bundleWithPred(); + } + if (Mask) { + TII->addFlag(BMI, 0, MO_FLAG_MASK); + } + if (Chan != 3) + TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST); + unsigned Opcode = BMI->getOpcode(); + // While not strictly necessary from hw point of view, we force + // all src operands of a dot4 inst to belong to the same slot. + unsigned Src0 = BMI->getOperand( + TII->getOperandIdx(Opcode, R600Operands::SRC0)) + .getReg(); + unsigned Src1 = BMI->getOperand( + TII->getOperandIdx(Opcode, R600Operands::SRC1)) + .getReg(); + (void) Src0; + (void) Src1; + if ((TRI.getEncodingValue(Src0) & 0xff) < 127 && + (TRI.getEncodingValue(Src1) & 0xff) < 127) + assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); + } + MI.eraseFromParent(); + continue; + } } bool IsReduction = TII->isReductionOp(MI.getOpcode()); @@ -268,12 +308,6 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { case AMDGPU::CUBE_eg_pseudo: Opcode = AMDGPU::CUBE_eg_real; break; - case AMDGPU::DOT4_r600_pseudo: - Opcode = AMDGPU::DOT4_r600_real; - break; - case AMDGPU::DOT4_eg_pseudo: - Opcode = AMDGPU::DOT4_eg_real; - break; default: break; } diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index a66baca..9cedadb 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -26,8 +26,7 @@ using namespace llvm; R600TargetLowering::R600TargetLowering(TargetMachine &TM) : - AMDGPUTargetLowering(TM), - TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo())) { + AMDGPUTargetLowering(TM) { addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); @@ -43,11 +42,25 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::AND, MVT::v4i32, Expand); setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Expand); setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Expand); + setOperationAction(ISD::MUL, MVT::v2i32, Expand); + setOperationAction(ISD::MUL, MVT::v4i32, Expand); + setOperationAction(ISD::OR, MVT::v4i32, Expand); + setOperationAction(ISD::OR, MVT::v2i32, Expand); setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Expand); + setOperationAction(ISD::SHL, MVT::v4i32, Expand); + setOperationAction(ISD::SHL, MVT::v2i32, Expand); + setOperationAction(ISD::SRL, MVT::v4i32, Expand); + setOperationAction(ISD::SRL, MVT::v2i32, Expand); + setOperationAction(ISD::SRA, MVT::v4i32, Expand); + setOperationAction(ISD::SRA, MVT::v2i32, Expand); + setOperationAction(ISD::SUB, MVT::v4i32, Expand); + setOperationAction(ISD::SUB, MVT::v2i32, Expand); setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Expand); setOperationAction(ISD::UDIV, MVT::v4i32, Expand); setOperationAction(ISD::UREM, MVT::v4i32, Expand); setOperationAction(ISD::SETCC, MVT::v4i32, Expand); + setOperationAction(ISD::XOR, MVT::v4i32, Expand); + setOperationAction(ISD::XOR, MVT::v2i32, Expand); setOperationAction(ISD::BR_CC, MVT::i32, Expand); setOperationAction(ISD::BR_CC, MVT::f32, Expand); @@ -58,8 +71,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom); - setOperationAction(ISD::ROTL, MVT::i32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); @@ -70,6 +81,9 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : setOperationAction(ISD::SELECT, MVT::i32, Custom); setOperationAction(ISD::SELECT, MVT::f32, Custom); + setOperationAction(ISD::VSELECT, MVT::v4i32, Expand); + setOperationAction(ISD::VSELECT, MVT::v2i32, Expand); + // Legalize loads and stores to the private address space. setOperationAction(ISD::LOAD, MVT::i32, Custom); setOperationAction(ISD::LOAD, MVT::v2i32, Custom); @@ -102,6 +116,8 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( MachineFunction * MF = BB->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); MachineBasicBlock::iterator I = *MI; + const R600InstrInfo *TII = + static_cast<const R600InstrInfo*>(MF->getTarget().getInstrInfo()); switch (MI->getOpcode()) { default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); @@ -171,23 +187,99 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( case AMDGPU::TXD: { unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); - + MachineOperand &RID = MI->getOperand(4); + MachineOperand &SID = MI->getOperand(5); + unsigned TextureId = MI->getOperand(6).getImm(); + unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3; + unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1; + + switch (TextureId) { + case 5: // Rect + CTX = CTY = 0; + break; + case 6: // Shadow1D + SrcW = SrcZ; + break; + case 7: // Shadow2D + SrcW = SrcZ; + break; + case 8: // ShadowRect + CTX = CTY = 0; + SrcW = SrcZ; + break; + case 9: // 1DArray + SrcZ = SrcY; + CTZ = 0; + break; + case 10: // 2DArray + CTZ = 0; + break; + case 11: // Shadow1DArray + SrcZ = SrcY; + CTZ = 0; + break; + case 12: // Shadow2DArray + CTZ = 0; + break; + } BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0) .addOperand(MI->getOperand(3)) - .addOperand(MI->getOperand(4)) - .addOperand(MI->getOperand(5)) - .addOperand(MI->getOperand(6)); + .addImm(SrcX) + .addImm(SrcY) + .addImm(SrcZ) + .addImm(SrcW) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(1) + .addImm(2) + .addImm(3) + .addOperand(RID) + .addOperand(SID) + .addImm(CTX) + .addImm(CTY) + .addImm(CTZ) + .addImm(CTW); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1) .addOperand(MI->getOperand(2)) - .addOperand(MI->getOperand(4)) - .addOperand(MI->getOperand(5)) - .addOperand(MI->getOperand(6)); + .addImm(SrcX) + .addImm(SrcY) + .addImm(SrcZ) + .addImm(SrcW) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(1) + .addImm(2) + .addImm(3) + .addOperand(RID) + .addOperand(SID) + .addImm(CTX) + .addImm(CTY) + .addImm(CTZ) + .addImm(CTW); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)) - .addOperand(MI->getOperand(4)) - .addOperand(MI->getOperand(5)) - .addOperand(MI->getOperand(6)) + .addImm(SrcX) + .addImm(SrcY) + .addImm(SrcZ) + .addImm(SrcW) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(1) + .addImm(2) + .addImm(3) + .addOperand(RID) + .addOperand(SID) + .addImm(CTX) + .addImm(CTY) + .addImm(CTZ) + .addImm(CTW) .addReg(T0, RegState::Implicit) .addReg(T1, RegState::Implicit); break; @@ -196,23 +288,100 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( case AMDGPU::TXD_SHADOW: { unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass); + MachineOperand &RID = MI->getOperand(4); + MachineOperand &SID = MI->getOperand(5); + unsigned TextureId = MI->getOperand(6).getImm(); + unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3; + unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1; + + switch (TextureId) { + case 5: // Rect + CTX = CTY = 0; + break; + case 6: // Shadow1D + SrcW = SrcZ; + break; + case 7: // Shadow2D + SrcW = SrcZ; + break; + case 8: // ShadowRect + CTX = CTY = 0; + SrcW = SrcZ; + break; + case 9: // 1DArray + SrcZ = SrcY; + CTZ = 0; + break; + case 10: // 2DArray + CTZ = 0; + break; + case 11: // Shadow1DArray + SrcZ = SrcY; + CTZ = 0; + break; + case 12: // Shadow2DArray + CTZ = 0; + break; + } BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0) .addOperand(MI->getOperand(3)) - .addOperand(MI->getOperand(4)) - .addOperand(MI->getOperand(5)) - .addOperand(MI->getOperand(6)); + .addImm(SrcX) + .addImm(SrcY) + .addImm(SrcZ) + .addImm(SrcW) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(1) + .addImm(2) + .addImm(3) + .addOperand(RID) + .addOperand(SID) + .addImm(CTX) + .addImm(CTY) + .addImm(CTZ) + .addImm(CTW); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1) .addOperand(MI->getOperand(2)) - .addOperand(MI->getOperand(4)) - .addOperand(MI->getOperand(5)) - .addOperand(MI->getOperand(6)); + .addImm(SrcX) + .addImm(SrcY) + .addImm(SrcZ) + .addImm(SrcW) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(1) + .addImm(2) + .addImm(3) + .addOperand(RID) + .addOperand(SID) + .addImm(CTX) + .addImm(CTY) + .addImm(CTZ) + .addImm(CTW); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G)) .addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(1)) - .addOperand(MI->getOperand(4)) - .addOperand(MI->getOperand(5)) - .addOperand(MI->getOperand(6)) + .addImm(SrcX) + .addImm(SrcY) + .addImm(SrcZ) + .addImm(SrcW) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(0) + .addImm(1) + .addImm(2) + .addImm(3) + .addOperand(RID) + .addOperand(SID) + .addImm(CTX) + .addImm(CTY) + .addImm(CTZ) + .addImm(CTW) .addReg(T0, RegState::Implicit) .addReg(T1, RegState::Implicit); break; @@ -304,13 +473,9 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( // Custom DAG Lowering Operations //===----------------------------------------------------------------------===// -using namespace llvm::Intrinsic; -using namespace llvm::AMDGPUIntrinsic; - SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); - case ISD::ROTL: return LowerROTL(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::SELECT: return LowerSELECT(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); @@ -327,7 +492,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex); MFI->LiveOuts.push_back(Reg); - return DAG.getCopyToReg(Chain, Op.getDebugLoc(), Reg, Op.getOperand(2)); + return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2)); } case AMDGPUIntrinsic::R600_store_swizzle: { const SDValue Args[8] = { @@ -340,7 +505,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const DAG.getConstant(2, MVT::i32), // SWZ_Z DAG.getConstant(3, MVT::i32) // SWZ_W }; - return DAG.getNode(AMDGPUISD::EXPORT, Op.getDebugLoc(), Op.getValueType(), + return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(), Args, 8); } @@ -354,13 +519,17 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); switch(IntrinsicID) { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); case AMDGPUIntrinsic::R600_load_input: { int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex); - return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, Reg, VT); + MachineFunction &MF = DAG.getMachineFunction(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + MRI.addLiveIn(Reg); + return DAG.getCopyFromReg(DAG.getEntryNode(), + SDLoc(DAG.getEntryNode()), Reg, VT); } case AMDGPUIntrinsic::R600_interp_input: { @@ -368,6 +537,9 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue(); MachineSDNode *interp; if (ijb < 0) { + const MachineFunction &MF = DAG.getMachineFunction(); + const R600InstrInfo *TII = + static_cast<const R600InstrInfo*>(MF.getTarget().getInstrInfo()); interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL, MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32)); return DAG.getTargetExtractSubreg( @@ -375,59 +547,153 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const DL, MVT::f32, SDValue(interp, 0)); } + MachineFunction &MF = DAG.getMachineFunction(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned RegisterI = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb); + unsigned RegisterJ = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1); + MRI.addLiveIn(RegisterI); + MRI.addLiveIn(RegisterJ); + SDValue RegisterINode = DAG.getCopyFromReg(DAG.getEntryNode(), + SDLoc(DAG.getEntryNode()), RegisterI, MVT::f32); + SDValue RegisterJNode = DAG.getCopyFromReg(DAG.getEntryNode(), + SDLoc(DAG.getEntryNode()), RegisterJ, MVT::f32); + if (slot % 4 < 2) interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL, MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32), - CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, - AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32), - CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, - AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32)); + RegisterJNode, RegisterINode); else interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL, MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32), - CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, - AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32), - CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, - AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32)); - + RegisterJNode, RegisterINode); return SDValue(interp, slot % 2); } + case AMDGPUIntrinsic::R600_tex: + case AMDGPUIntrinsic::R600_texc: + case AMDGPUIntrinsic::R600_txl: + case AMDGPUIntrinsic::R600_txlc: + case AMDGPUIntrinsic::R600_txb: + case AMDGPUIntrinsic::R600_txbc: + case AMDGPUIntrinsic::R600_txf: + case AMDGPUIntrinsic::R600_txq: + case AMDGPUIntrinsic::R600_ddx: + case AMDGPUIntrinsic::R600_ddy: { + unsigned TextureOp; + switch (IntrinsicID) { + case AMDGPUIntrinsic::R600_tex: + TextureOp = 0; + break; + case AMDGPUIntrinsic::R600_texc: + TextureOp = 1; + break; + case AMDGPUIntrinsic::R600_txl: + TextureOp = 2; + break; + case AMDGPUIntrinsic::R600_txlc: + TextureOp = 3; + break; + case AMDGPUIntrinsic::R600_txb: + TextureOp = 4; + break; + case AMDGPUIntrinsic::R600_txbc: + TextureOp = 5; + break; + case AMDGPUIntrinsic::R600_txf: + TextureOp = 6; + break; + case AMDGPUIntrinsic::R600_txq: + TextureOp = 7; + break; + case AMDGPUIntrinsic::R600_ddx: + TextureOp = 8; + break; + case AMDGPUIntrinsic::R600_ddy: + TextureOp = 9; + break; + default: + llvm_unreachable("Unknow Texture Operation"); + } - case r600_read_ngroups_x: + SDValue TexArgs[19] = { + DAG.getConstant(TextureOp, MVT::i32), + Op.getOperand(1), + DAG.getConstant(0, MVT::i32), + DAG.getConstant(1, MVT::i32), + DAG.getConstant(2, MVT::i32), + DAG.getConstant(3, MVT::i32), + Op.getOperand(2), + Op.getOperand(3), + Op.getOperand(4), + DAG.getConstant(0, MVT::i32), + DAG.getConstant(1, MVT::i32), + DAG.getConstant(2, MVT::i32), + DAG.getConstant(3, MVT::i32), + Op.getOperand(5), + Op.getOperand(6), + Op.getOperand(7), + Op.getOperand(8), + Op.getOperand(9), + Op.getOperand(10) + }; + return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs, 19); + } + case AMDGPUIntrinsic::AMDGPU_dp4: { + SDValue Args[8] = { + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), + DAG.getConstant(0, MVT::i32)), + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), + DAG.getConstant(0, MVT::i32)), + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), + DAG.getConstant(1, MVT::i32)), + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), + DAG.getConstant(1, MVT::i32)), + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), + DAG.getConstant(2, MVT::i32)), + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), + DAG.getConstant(2, MVT::i32)), + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), + DAG.getConstant(3, MVT::i32)), + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), + DAG.getConstant(3, MVT::i32)) + }; + return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args, 8); + } + + case Intrinsic::r600_read_ngroups_x: return LowerImplicitParameter(DAG, VT, DL, 0); - case r600_read_ngroups_y: + case Intrinsic::r600_read_ngroups_y: return LowerImplicitParameter(DAG, VT, DL, 1); - case r600_read_ngroups_z: + case Intrinsic::r600_read_ngroups_z: return LowerImplicitParameter(DAG, VT, DL, 2); - case r600_read_global_size_x: + case Intrinsic::r600_read_global_size_x: return LowerImplicitParameter(DAG, VT, DL, 3); - case r600_read_global_size_y: + case Intrinsic::r600_read_global_size_y: return LowerImplicitParameter(DAG, VT, DL, 4); - case r600_read_global_size_z: + case Intrinsic::r600_read_global_size_z: return LowerImplicitParameter(DAG, VT, DL, 5); - case r600_read_local_size_x: + case Intrinsic::r600_read_local_size_x: return LowerImplicitParameter(DAG, VT, DL, 6); - case r600_read_local_size_y: + case Intrinsic::r600_read_local_size_y: return LowerImplicitParameter(DAG, VT, DL, 7); - case r600_read_local_size_z: + case Intrinsic::r600_read_local_size_z: return LowerImplicitParameter(DAG, VT, DL, 8); - case r600_read_tgid_x: + case Intrinsic::r600_read_tgid_x: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T1_X, VT); - case r600_read_tgid_y: + case Intrinsic::r600_read_tgid_y: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T1_Y, VT); - case r600_read_tgid_z: + case Intrinsic::r600_read_tgid_z: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T1_Z, VT); - case r600_read_tidig_x: + case Intrinsic::r600_read_tidig_x: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T0_X, VT); - case r600_read_tidig_y: + case Intrinsic::r600_read_tidig_y: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T0_Y, VT); - case r600_read_tidig_z: + case Intrinsic::r600_read_tidig_z: return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, AMDGPU::T0_Z, VT); } @@ -464,7 +730,7 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N, SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode( ISD::SETCC, - Op.getDebugLoc(), + SDLoc(Op), MVT::i1, Op, DAG.getConstantFP(0.0f, MVT::f32), DAG.getCondCode(ISD::SETNE) @@ -472,7 +738,7 @@ SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const { } SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT, - DebugLoc DL, + SDLoc DL, unsigned DwordOffset) const { unsigned ByteOffset = DwordOffset * 4; PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), @@ -501,18 +767,6 @@ SDValue R600TargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), MVT::i32); } -SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); - EVT VT = Op.getValueType(); - - return DAG.getNode(AMDGPUISD::BITALIGN, DL, VT, - Op.getOperand(0), - Op.getOperand(0), - DAG.getNode(ISD::SUB, DL, VT, - DAG.getConstant(32, MVT::i32), - Op.getOperand(1))); -} - bool R600TargetLowering::isZero(SDValue Op) const { if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) { return Cst->isNullValue(); @@ -524,7 +778,7 @@ bool R600TargetLowering::isZero(SDValue Op) const { } SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue LHS = Op.getOperand(0); @@ -645,7 +899,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(ISD::SELECT_CC, - Op.getDebugLoc(), + SDLoc(Op), Op.getValueType(), Op.getOperand(0), DAG.getConstant(0, MVT::i32), @@ -676,7 +930,7 @@ SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr, default: llvm_unreachable("Invalid stack width"); } - return DAG.getNode(ISD::SRL, Ptr.getDebugLoc(), Ptr.getValueType(), Ptr, + return DAG.getNode(ISD::SRL, SDLoc(Ptr), Ptr.getValueType(), Ptr, DAG.getConstant(SRLPad, MVT::i32)); } @@ -710,7 +964,7 @@ void R600TargetLowering::getStackAddress(unsigned StackWidth, } SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); StoreSDNode *StoreNode = cast<StoreSDNode>(Op); SDValue Chain = Op.getOperand(0); SDValue Value = Op.getOperand(1); @@ -772,7 +1026,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value); } Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr, - DAG.getTargetConstant(0, MVT::i32)); // Channel + DAG.getTargetConstant(0, MVT::i32)); // Channel } return Chain; @@ -822,7 +1076,7 @@ ConstantAddressBlock(unsigned AddressSpace) { SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); LoadSDNode *LoadNode = cast<LoadSDNode>(Op); SDValue Chain = Op.getOperand(0); SDValue Ptr = Op.getOperand(1); @@ -851,7 +1105,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)), DAG.getConstant(LoadNode->getAddressSpace() - - AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32) + AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32) ); } @@ -924,7 +1178,7 @@ SDValue R600TargetLowering::LowerFormalArguments( CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { unsigned ParamOffsetBytes = 36; Function::const_arg_iterator FuncArg = @@ -955,11 +1209,105 @@ SDValue R600TargetLowering::LowerFormalArguments( return Chain; } -EVT R600TargetLowering::getSetCCResultType(EVT VT) const { +EVT R600TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { if (!VT.isVector()) return MVT::i32; return VT.changeVectorElementTypeToInteger(); } +static SDValue +CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry, + DenseMap<unsigned, unsigned> &RemapSwizzle) { + assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR); + assert(RemapSwizzle.empty()); + SDValue NewBldVec[4] = { + VectorEntry.getOperand(0), + VectorEntry.getOperand(1), + VectorEntry.getOperand(2), + VectorEntry.getOperand(3) + }; + + for (unsigned i = 0; i < 4; i++) { + if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) { + if (C->isZero()) { + RemapSwizzle[i] = 4; // SEL_0 + NewBldVec[i] = DAG.getUNDEF(MVT::f32); + } else if (C->isExactlyValue(1.0)) { + RemapSwizzle[i] = 5; // SEL_1 + NewBldVec[i] = DAG.getUNDEF(MVT::f32); + } + } + + if (NewBldVec[i].getOpcode() == ISD::UNDEF) + continue; + for (unsigned j = 0; j < i; j++) { + if (NewBldVec[i] == NewBldVec[j]) { + NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType()); + RemapSwizzle[i] = j; + break; + } + } + } + + return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry), + VectorEntry.getValueType(), NewBldVec, 4); +} + +static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, + DenseMap<unsigned, unsigned> &RemapSwizzle) { + assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR); + assert(RemapSwizzle.empty()); + SDValue NewBldVec[4] = { + VectorEntry.getOperand(0), + VectorEntry.getOperand(1), + VectorEntry.getOperand(2), + VectorEntry.getOperand(3) + }; + bool isUnmovable[4] = { false, false, false, false }; + + for (unsigned i = 0; i < 4; i++) { + if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1)) + ->getZExtValue(); + if (!isUnmovable[Idx]) { + // Swap i and Idx + std::swap(NewBldVec[Idx], NewBldVec[i]); + RemapSwizzle[Idx] = i; + RemapSwizzle[i] = Idx; + } + isUnmovable[Idx] = true; + } + } + + return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry), + VectorEntry.getValueType(), NewBldVec, 4); +} + + +SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, +SDValue Swz[4], SelectionDAG &DAG) const { + assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR); + // Old -> New swizzle values + DenseMap<unsigned, unsigned> SwizzleRemap; + + BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap); + for (unsigned i = 0; i < 4; i++) { + unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue(); + if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) + Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); + } + + SwizzleRemap.clear(); + BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap); + for (unsigned i = 0; i < 4; i++) { + unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue(); + if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) + Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); + } + + return BuildVector; +} + + //===----------------------------------------------------------------------===// // Custom DAG Optimizations //===----------------------------------------------------------------------===// @@ -973,7 +1321,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, case ISD::FP_ROUND: { SDValue Arg = N->getOperand(0); if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) { - return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), N->getValueType(0), Arg.getOperand(0)); } break; @@ -998,7 +1346,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } - return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N->getValueType(0), + return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0), SelectCC.getOperand(0), // LHS SelectCC.getOperand(1), // RHS DAG.getConstant(-1, MVT::i32), // True @@ -1021,7 +1369,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { unsigned Element = Const->getZExtValue(); - return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getVTList(), + return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getVTList(), Arg->getOperand(0).getOperand(Element)); } } @@ -1056,7 +1404,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get(); LHSCC = ISD::getSetCCInverse(LHSCC, LHS.getOperand(0).getValueType().isInteger()); - return DAG.getSelectCC(N->getDebugLoc(), + return DAG.getSelectCC(SDLoc(N), LHS.getOperand(0), LHS.getOperand(1), LHS.getOperand(2), @@ -1069,12 +1417,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, SDValue Arg = N->getOperand(1); if (Arg.getOpcode() != ISD::BUILD_VECTOR) break; - SDValue NewBldVec[4] = { - DAG.getUNDEF(MVT::f32), - DAG.getUNDEF(MVT::f32), - DAG.getUNDEF(MVT::f32), - DAG.getUNDEF(MVT::f32) - }; + SDValue NewArgs[8] = { N->getOperand(0), // Chain SDValue(), @@ -1085,23 +1428,40 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, N->getOperand(6), // SWZ_Z N->getOperand(7) // SWZ_W }; - for (unsigned i = 0; i < Arg.getNumOperands(); i++) { - if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg.getOperand(i))) { - if (C->isZero()) { - NewArgs[4 + i] = DAG.getConstant(4, MVT::i32); // SEL_0 - } else if (C->isExactlyValue(1.0)) { - NewArgs[4 + i] = DAG.getConstant(5, MVT::i32); // SEL_0 - } else { - NewBldVec[i] = Arg.getOperand(i); - } - } else { - NewBldVec[i] = Arg.getOperand(i); - } - } - DebugLoc DL = N->getDebugLoc(); - NewArgs[1] = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4f32, NewBldVec, 4); + SDLoc DL(N); + NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG); return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8); } + case AMDGPUISD::TEXTURE_FETCH: { + SDValue Arg = N->getOperand(1); + if (Arg.getOpcode() != ISD::BUILD_VECTOR) + break; + + SDValue NewArgs[19] = { + N->getOperand(0), + N->getOperand(1), + N->getOperand(2), + N->getOperand(3), + N->getOperand(4), + N->getOperand(5), + N->getOperand(6), + N->getOperand(7), + N->getOperand(8), + N->getOperand(9), + N->getOperand(10), + N->getOperand(11), + N->getOperand(12), + N->getOperand(13), + N->getOperand(14), + N->getOperand(15), + N->getOperand(16), + N->getOperand(17), + N->getOperand(18), + }; + NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG); + return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, SDLoc(N), N->getVTList(), + NewArgs, 19); + } } return SDValue(); } diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h index 2c09acb..d4ba4c8 100644 --- a/lib/Target/R600/R600ISelLowering.h +++ b/lib/Target/R600/R600ISelLowering.h @@ -36,21 +36,20 @@ public: CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; - virtual EVT getSetCCResultType(EVT VT) const; + virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const; private: - const R600InstrInfo * TII; - /// Each OpenCL kernel has nine implicit parameters that are stored in the /// first nine dwords of a Vertex Buffer. These implicit parameters are /// lowered to load instructions which retreive the values from the Vertex /// Buffer. SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT, - DebugLoc DL, unsigned DwordOffset) const; + SDLoc DL, unsigned DwordOffset) const; void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB, MachineRegisterInfo & MRI, unsigned dword_offset) const; + SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG) const; /// \brief Lower ROTL opcode to BITALIGN SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp index 8fd8385..4f5cfcd 100644 --- a/lib/Target/R600/R600InstrInfo.cpp +++ b/lib/Target/R600/R600InstrInfo.cpp @@ -19,8 +19,8 @@ #include "R600Defines.h" #include "R600MachineFunctionInfo.h" #include "R600RegisterInfo.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #define GET_INSTRINFO_CTOR @@ -30,7 +30,7 @@ using namespace llvm; R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm) : AMDGPUInstrInfo(tm), - RI(tm, *this), + RI(tm), ST(tm.getSubtarget<AMDGPUSubtarget>()) { } @@ -116,9 +116,6 @@ bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const { bool R600InstrInfo::isReductionOp(unsigned Opcode) const { switch(Opcode) { default: return false; - case AMDGPU::DOT4_r600_pseudo: - case AMDGPU::DOT4_eg_pseudo: - return true; } } @@ -150,7 +147,7 @@ bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const { } bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { - return ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST; + return ST.hasVertexCache() && IS_VTX(get(Opcode)); } bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const { @@ -159,8 +156,7 @@ bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const { } bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { - return (!ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST) || - (get(Opcode).TSFlags & R600_InstFlag::TEX_INST); + return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode)); } bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { @@ -169,6 +165,181 @@ bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { usesTextureCache(MI->getOpcode()); } +SmallVector<std::pair<MachineOperand *, int64_t>, 3> +R600InstrInfo::getSrcs(MachineInstr *MI) const { + SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result; + + if (MI->getOpcode() == AMDGPU::DOT_4) { + static const R600Operands::VecOps OpTable[8][2] = { + {R600Operands::SRC0_X, R600Operands::SRC0_SEL_X}, + {R600Operands::SRC0_Y, R600Operands::SRC0_SEL_Y}, + {R600Operands::SRC0_Z, R600Operands::SRC0_SEL_Z}, + {R600Operands::SRC0_W, R600Operands::SRC0_SEL_W}, + {R600Operands::SRC1_X, R600Operands::SRC1_SEL_X}, + {R600Operands::SRC1_Y, R600Operands::SRC1_SEL_Y}, + {R600Operands::SRC1_Z, R600Operands::SRC1_SEL_Z}, + {R600Operands::SRC1_W, R600Operands::SRC1_SEL_W}, + }; + + for (unsigned j = 0; j < 8; j++) { + MachineOperand &MO = MI->getOperand(OpTable[j][0] + 1); + unsigned Reg = MO.getReg(); + if (Reg == AMDGPU::ALU_CONST) { + unsigned Sel = MI->getOperand(OpTable[j][1] + 1).getImm(); + Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); + continue; + } + + } + return Result; + } + + static const R600Operands::Ops OpTable[3][2] = { + {R600Operands::SRC0, R600Operands::SRC0_SEL}, + {R600Operands::SRC1, R600Operands::SRC1_SEL}, + {R600Operands::SRC2, R600Operands::SRC2_SEL}, + }; + + for (unsigned j = 0; j < 3; j++) { + int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]); + if (SrcIdx < 0) + break; + MachineOperand &MO = MI->getOperand(SrcIdx); + unsigned Reg = MI->getOperand(SrcIdx).getReg(); + if (Reg == AMDGPU::ALU_CONST) { + unsigned Sel = MI->getOperand( + getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); + Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); + continue; + } + if (Reg == AMDGPU::ALU_LITERAL_X) { + unsigned Imm = MI->getOperand( + getOperandIdx(MI->getOpcode(), R600Operands::IMM)).getImm(); + Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm)); + continue; + } + Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0)); + } + return Result; +} + +std::vector<std::pair<int, unsigned> > +R600InstrInfo::ExtractSrcs(MachineInstr *MI, + const DenseMap<unsigned, unsigned> &PV) + const { + const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI); + const std::pair<int, unsigned> DummyPair(-1, 0); + std::vector<std::pair<int, unsigned> > Result; + unsigned i = 0; + for (unsigned n = Srcs.size(); i < n; ++i) { + unsigned Reg = Srcs[i].first->getReg(); + unsigned Index = RI.getEncodingValue(Reg) & 0xff; + unsigned Chan = RI.getHWRegChan(Reg); + if (Index > 127) { + Result.push_back(DummyPair); + continue; + } + if (PV.find(Index) != PV.end()) { + Result.push_back(DummyPair); + continue; + } + Result.push_back(std::pair<int, unsigned>(Index, Chan)); + } + for (; i < 3; ++i) + Result.push_back(DummyPair); + return Result; +} + +static std::vector<std::pair<int, unsigned> > +Swizzle(std::vector<std::pair<int, unsigned> > Src, + R600InstrInfo::BankSwizzle Swz) { + switch (Swz) { + case R600InstrInfo::ALU_VEC_012: + break; + case R600InstrInfo::ALU_VEC_021: + std::swap(Src[1], Src[2]); + break; + case R600InstrInfo::ALU_VEC_102: + std::swap(Src[0], Src[1]); + break; + case R600InstrInfo::ALU_VEC_120: + std::swap(Src[0], Src[1]); + std::swap(Src[0], Src[2]); + break; + case R600InstrInfo::ALU_VEC_201: + std::swap(Src[0], Src[2]); + std::swap(Src[0], Src[1]); + break; + case R600InstrInfo::ALU_VEC_210: + std::swap(Src[0], Src[2]); + break; + } + return Src; +} + +static bool +isLegal(const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, + const std::vector<R600InstrInfo::BankSwizzle> &Swz, + unsigned CheckedSize) { + int Vector[4][3]; + memset(Vector, -1, sizeof(Vector)); + for (unsigned i = 0; i < CheckedSize; i++) { + const std::vector<std::pair<int, unsigned> > &Srcs = + Swizzle(IGSrcs[i], Swz[i]); + for (unsigned j = 0; j < 3; j++) { + const std::pair<int, unsigned> &Src = Srcs[j]; + if (Src.first < 0) + continue; + if (Vector[Src.second][j] < 0) + Vector[Src.second][j] = Src.first; + if (Vector[Src.second][j] != Src.first) + return false; + } + } + return true; +} + +static bool recursiveFitsFPLimitation( +const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, +std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, +unsigned Depth = 0) { + if (!isLegal(IGSrcs, SwzCandidate, Depth)) + return false; + if (IGSrcs.size() == Depth) + return true; + unsigned i = SwzCandidate[Depth]; + for (; i < 6; i++) { + SwzCandidate[Depth] = (R600InstrInfo::BankSwizzle) i; + if (recursiveFitsFPLimitation(IGSrcs, SwzCandidate, Depth + 1)) + return true; + } + SwzCandidate[Depth] = R600InstrInfo::ALU_VEC_012; + return false; +} + +bool +R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG, + const DenseMap<unsigned, unsigned> &PV, + std::vector<BankSwizzle> &ValidSwizzle) + const { + //Todo : support shared src0 - src1 operand + + std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs; + ValidSwizzle.clear(); + for (unsigned i = 0, e = IG.size(); i < e; ++i) { + IGSrcs.push_back(ExtractSrcs(IG[i], PV)); + unsigned Op = getOperandIdx(IG[i]->getOpcode(), + R600Operands::BANK_SWIZZLE); + ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle) + IG[i]->getOperand(Op).getImm()); + } + bool Result = recursiveFitsFPLimitation(IGSrcs, ValidSwizzle); + if (!Result) + return false; + return true; +} + + bool R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) const { @@ -198,34 +369,22 @@ bool R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const { std::vector<unsigned> Consts; for (unsigned i = 0, n = MIs.size(); i < n; i++) { - const MachineInstr *MI = MIs[i]; - - const R600Operands::Ops OpTable[3][2] = { - {R600Operands::SRC0, R600Operands::SRC0_SEL}, - {R600Operands::SRC1, R600Operands::SRC1_SEL}, - {R600Operands::SRC2, R600Operands::SRC2_SEL}, - }; - + MachineInstr *MI = MIs[i]; if (!isALUInstr(MI->getOpcode())) continue; - for (unsigned j = 0; j < 3; j++) { - int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]); - if (SrcIdx < 0) - break; - unsigned Reg = MI->getOperand(SrcIdx).getReg(); - if (Reg == AMDGPU::ALU_CONST) { - unsigned Const = MI->getOperand( - getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); - Consts.push_back(Const); - continue; - } - if (AMDGPU::R600_KC0RegClass.contains(Reg) || - AMDGPU::R600_KC1RegClass.contains(Reg)) { - unsigned Index = RI.getEncodingValue(Reg) & 0xff; - unsigned Chan = RI.getHWRegChan(Reg); + const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Srcs = + getSrcs(MI); + + for (unsigned j = 0, e = Srcs.size(); j < e; j++) { + std::pair<MachineOperand *, unsigned> Src = Srcs[j]; + if (Src.first->getReg() == AMDGPU::ALU_CONST) + Consts.push_back(Src.second); + if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) || + AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) { + unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff; + unsigned Chan = RI.getHWRegChan(Src.first->getReg()); Consts.push_back((Index << 2) | Chan); - continue; } } } @@ -657,7 +816,8 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, AddrReg, ValueReg) - .addReg(AMDGPU::AR_X, RegState::Implicit); + .addReg(AMDGPU::AR_X, + RegState::Implicit | RegState::Kill); setImmOperand(Mov, R600Operands::DST_REL, 1); return Mov; } @@ -674,7 +834,8 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, ValueReg, AddrReg) - .addReg(AMDGPU::AR_X, RegState::Implicit); + .addReg(AMDGPU::AR_X, + RegState::Implicit | RegState::Kill); setImmOperand(Mov, R600Operands::SRC0_REL, 1); return Mov; @@ -729,6 +890,95 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB return MIB; } +#define OPERAND_CASE(Label) \ + case Label: { \ + static const R600Operands::VecOps Ops[] = \ + { \ + Label##_X, \ + Label##_Y, \ + Label##_Z, \ + Label##_W \ + }; \ + return Ops[Slot]; \ + } + +static R600Operands::VecOps +getSlotedOps(R600Operands::Ops Op, unsigned Slot) { + switch (Op) { + OPERAND_CASE(R600Operands::UPDATE_EXEC_MASK) + OPERAND_CASE(R600Operands::UPDATE_PREDICATE) + OPERAND_CASE(R600Operands::WRITE) + OPERAND_CASE(R600Operands::OMOD) + OPERAND_CASE(R600Operands::DST_REL) + OPERAND_CASE(R600Operands::CLAMP) + OPERAND_CASE(R600Operands::SRC0) + OPERAND_CASE(R600Operands::SRC0_NEG) + OPERAND_CASE(R600Operands::SRC0_REL) + OPERAND_CASE(R600Operands::SRC0_ABS) + OPERAND_CASE(R600Operands::SRC0_SEL) + OPERAND_CASE(R600Operands::SRC1) + OPERAND_CASE(R600Operands::SRC1_NEG) + OPERAND_CASE(R600Operands::SRC1_REL) + OPERAND_CASE(R600Operands::SRC1_ABS) + OPERAND_CASE(R600Operands::SRC1_SEL) + OPERAND_CASE(R600Operands::PRED_SEL) + default: + llvm_unreachable("Wrong Operand"); + } +} + +#undef OPERAND_CASE + +static int +getVecOperandIdx(R600Operands::VecOps Op) { + return 1 + Op; +} + + +MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( + MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg) + const { + assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented"); + unsigned Opcode; + const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); + if (ST.getGeneration() <= AMDGPUSubtarget::R700) + Opcode = AMDGPU::DOT4_r600; + else + Opcode = AMDGPU::DOT4_eg; + MachineBasicBlock::iterator I = MI; + MachineOperand &Src0 = MI->getOperand( + getVecOperandIdx(getSlotedOps(R600Operands::SRC0, Slot))); + MachineOperand &Src1 = MI->getOperand( + getVecOperandIdx(getSlotedOps(R600Operands::SRC1, Slot))); + MachineInstr *MIB = buildDefaultInstruction( + MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg()); + static const R600Operands::Ops Operands[14] = { + R600Operands::UPDATE_EXEC_MASK, + R600Operands::UPDATE_PREDICATE, + R600Operands::WRITE, + R600Operands::OMOD, + R600Operands::DST_REL, + R600Operands::CLAMP, + R600Operands::SRC0_NEG, + R600Operands::SRC0_REL, + R600Operands::SRC0_ABS, + R600Operands::SRC0_SEL, + R600Operands::SRC1_NEG, + R600Operands::SRC1_REL, + R600Operands::SRC1_ABS, + R600Operands::SRC1_SEL, + }; + + for (unsigned i = 0; i < 14; i++) { + MachineOperand &MO = MI->getOperand( + getVecOperandIdx(getSlotedOps(Operands[i], Slot))); + assert (MO.isImm()); + setImmOperand(MIB, Operands[i], MO.getImm()); + } + MIB->getOperand(20).setImm(0); + return MIB; +} + MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, MachineBasicBlock::iterator I, unsigned DstReg, @@ -744,6 +994,11 @@ int R600InstrInfo::getOperandIdx(const MachineInstr &MI, return getOperandIdx(MI.getOpcode(), Op); } +int R600InstrInfo::getOperandIdx(const MachineInstr &MI, + R600Operands::VecOps Op) const { + return getOperandIdx(MI.getOpcode(), Op); +} + int R600InstrInfo::getOperandIdx(unsigned Opcode, R600Operands::Ops Op) const { unsigned TargetFlags = get(Opcode).TSFlags; @@ -774,6 +1029,11 @@ int R600InstrInfo::getOperandIdx(unsigned Opcode, return R600Operands::ALUOpTable[OpTableIdx][Op]; } +int R600InstrInfo::getOperandIdx(unsigned Opcode, + R600Operands::VecOps Op) const { + return Op + 1; +} + void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op, int64_t Imm) const { int Idx = getOperandIdx(*MI, Op); diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h index babe4b8..6a11c63 100644 --- a/lib/Target/R600/R600InstrInfo.h +++ b/lib/Target/R600/R600InstrInfo.h @@ -16,7 +16,6 @@ #define R600INSTRUCTIONINFO_H_ #include "AMDGPUInstrInfo.h" -#include "AMDIL.h" #include "R600Defines.h" #include "R600RegisterInfo.h" #include <map> @@ -36,8 +35,19 @@ namespace llvm { const AMDGPUSubtarget &ST; int getBranchInstr(const MachineOperand &op) const; + std::vector<std::pair<int, unsigned> > + ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV) const; public: + enum BankSwizzle { + ALU_VEC_012 = 0, + ALU_VEC_021, + ALU_VEC_120, + ALU_VEC_102, + ALU_VEC_201, + ALU_VEC_210 + }; + explicit R600InstrInfo(AMDGPUTargetMachine &tm); const R600RegisterInfo &getRegisterInfo() const; @@ -62,6 +72,23 @@ namespace llvm { bool usesTextureCache(unsigned Opcode) const; bool usesTextureCache(const MachineInstr *MI) const; + /// \returns a pair for each src of an ALU instructions. + /// The first member of a pair is the register id. + /// If register is ALU_CONST, second member is SEL. + /// If register is ALU_LITERAL, second member is IMM. + /// Otherwise, second member value is undefined. + SmallVector<std::pair<MachineOperand *, int64_t>, 3> + getSrcs(MachineInstr *MI) const; + + /// Given the order VEC_012 < VEC_021 < VEC_120 < VEC_102 < VEC_201 < VEC_210 + /// returns true and the first (in lexical order) BankSwizzle affectation + /// starting from the one already provided in the Instruction Group MIs that + /// fits Read Port limitations in BS if available. Otherwise returns false + /// and undefined content in BS. + /// PV holds GPR to PV registers in the Instruction Group MIs. + bool fitsReadPortLimitations(const std::vector<MachineInstr *> &MIs, + const DenseMap<unsigned, unsigned> &PV, + std::vector<BankSwizzle> &BS) const; bool fitsConstReadLimitations(const std::vector<unsigned>&) const; bool canBundle(const std::vector<MachineInstr *> &) const; @@ -170,6 +197,11 @@ namespace llvm { unsigned Src0Reg, unsigned Src1Reg = 0) const; + MachineInstr *buildSlotOfVectorInstruction(MachineBasicBlock &MBB, + MachineInstr *MI, + unsigned Slot, + unsigned DstReg) const; + MachineInstr *buildMovImm(MachineBasicBlock &BB, MachineBasicBlock::iterator I, unsigned DstReg, @@ -179,11 +211,13 @@ namespace llvm { /// /// \returns -1 if the Instruction does not contain the specified \p Op. int getOperandIdx(const MachineInstr &MI, R600Operands::Ops Op) const; + int getOperandIdx(const MachineInstr &MI, R600Operands::VecOps Op) const; /// \brief Get the index of \p Op for the given Opcode. /// /// \returns -1 if the Instruction does not contain the specified \p Op. int getOperandIdx(unsigned Opcode, R600Operands::Ops Op) const; + int getOperandIdx(unsigned Opcode, R600Operands::VecOps Op) const; /// \brief Helper function for setting instruction flag values. void setImmOperand(MachineInstr *MI, R600Operands::Ops Op, int64_t Imm) const; diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td index 1060b0a..b4131be 100644 --- a/lib/Target/R600/R600Instructions.td +++ b/lib/Target/R600/R600Instructions.td @@ -78,7 +78,7 @@ def SEL : OperandWithDefaultOps <i32, (ops (i32 -1))> { let PrintMethod = "printSel"; } def BANK_SWIZZLE : OperandWithDefaultOps <i32, (ops (i32 0))> { - let PrintMethod = "printSel"; + let PrintMethod = "printBankSwizzle"; } def LITERAL : InstFlag<"printLiteral">; @@ -96,6 +96,12 @@ def UP : InstFlag <"printUpdatePred">; // Once we start using the packetizer in this backend we should have this // default to 0. def LAST : InstFlag<"printLast", 1>; +def RSel : Operand<i32> { + let PrintMethod = "printRSel"; +} +def CT: Operand<i32> { + let PrintMethod = "printCT"; +} def FRAMEri : Operand<iPTR> { let MIOperandInfo = (ops R600_Reg32:$ptr, i32imm:$index); @@ -358,9 +364,9 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern, LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal, BANK_SWIZZLE:$bank_swizzle), !strconcat(" ", opName, - "$clamp $dst$write$dst_rel$omod, " + "$clamp $last $dst$write$dst_rel$omod, " "$src0_neg$src0_abs$src0$src0_abs$src0_rel, " - "$literal $pred_sel$last"), + "$pred_sel $bank_swizzle"), pattern, itin>, R600ALU_Word0, @@ -399,10 +405,10 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern, LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal, BANK_SWIZZLE:$bank_swizzle), !strconcat(" ", opName, - "$clamp $update_exec_mask$update_pred$dst$write$dst_rel$omod, " + "$clamp $last $update_exec_mask$update_pred$dst$write$dst_rel$omod, " "$src0_neg$src0_abs$src0$src0_abs$src0_rel, " "$src1_neg$src1_abs$src1$src1_abs$src1_rel, " - "$literal $pred_sel$last"), + "$pred_sel $bank_swizzle"), pattern, itin>, R600ALU_Word0, @@ -436,11 +442,12 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern, R600_Reg32:$src2, NEG:$src2_neg, REL:$src2_rel, SEL:$src2_sel, LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal, BANK_SWIZZLE:$bank_swizzle), - !strconcat(" ", opName, "$clamp $dst$dst_rel, " + !strconcat(" ", opName, "$clamp $last $dst$dst_rel, " "$src0_neg$src0$src0_rel, " "$src1_neg$src1$src1_rel, " "$src2_neg$src2$src2_rel, " - "$literal $pred_sel$last"), + "$pred_sel" + "$bank_swizzle"), pattern, itin>, R600ALU_Word0, @@ -462,38 +469,7 @@ class R600_REDUCTION <bits<11> inst, dag ins, string asm, list<dag> pattern, pattern, itin>; -class R600_TEX <bits<11> inst, string opName, list<dag> pattern, - InstrItinClass itin = AnyALU> : - InstR600 <(outs R600_Reg128:$DST_GPR), - (ins R600_Reg128:$SRC_GPR, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID, i32imm:$textureTarget), - !strconcat(opName, "$DST_GPR, $SRC_GPR, $RESOURCE_ID, $SAMPLER_ID, $textureTarget"), - pattern, - itin>, TEX_WORD0, TEX_WORD1, TEX_WORD2 { - let Inst{31-0} = Word0; - let Inst{63-32} = Word1; - - let TEX_INST = inst{4-0}; - let SRC_REL = 0; - let DST_REL = 0; - let DST_SEL_X = 0; - let DST_SEL_Y = 1; - let DST_SEL_Z = 2; - let DST_SEL_W = 3; - let LOD_BIAS = 0; - - let INST_MOD = 0; - let FETCH_WHOLE_QUAD = 0; - let ALT_CONST = 0; - let SAMPLER_INDEX_MODE = 0; - let RESOURCE_INDEX_MODE = 0; - - let COORD_TYPE_X = 0; - let COORD_TYPE_Y = 0; - let COORD_TYPE_Z = 0; - let COORD_TYPE_W = 0; - - let TEXInst = 1; - } + } // End mayLoad = 1, mayStore = 0, hasSideEffects = 0 @@ -575,26 +551,21 @@ def load_param : LoadParamFrag<load>; def load_param_zexti8 : LoadParamFrag<zextloadi8>; def load_param_zexti16 : LoadParamFrag<zextloadi16>; -def isR600 : Predicate<"Subtarget.device()" - "->getGeneration() == AMDGPUDeviceInfo::HD4XXX">; -def isR700 : Predicate<"Subtarget.device()" - "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&" - "Subtarget.device()->getDeviceFlag()" - ">= OCL_DEVICE_RV710">; +def isR600 : Predicate<"Subtarget.getGeneration() <= AMDGPUSubtarget::R700">; +def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">; def isEG : Predicate< - "Subtarget.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX && " - "Subtarget.device()->getGeneration() < AMDGPUDeviceInfo::HD7XXX && " - "Subtarget.device()->getDeviceFlag() != OCL_DEVICE_CAYMAN">; + "Subtarget.getGeneration() >= AMDGPUSubtarget::EVERGREEN && " + "Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && " + "!Subtarget.hasCaymanISA()">; -def isCayman : Predicate<"Subtarget.device()" - "->getDeviceFlag() == OCL_DEVICE_CAYMAN">; -def isEGorCayman : Predicate<"Subtarget.device()" - "->getGeneration() == AMDGPUDeviceInfo::HD5XXX" - "|| Subtarget.device()->getGeneration() ==" - "AMDGPUDeviceInfo::HD6XXX">; +def isCayman : Predicate<"Subtarget.hasCaymanISA()">; +def isEGorCayman : Predicate<"Subtarget.getGeneration() == " + "AMDGPUSubtarget::EVERGREEN" + "|| Subtarget.getGeneration() ==" + "AMDGPUSubtarget::NORTHERN_ISLANDS">; def isR600toCayman : Predicate< - "Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX">; + "Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">; //===----------------------------------------------------------------------===// // R600 SDNodes @@ -602,13 +573,13 @@ def isR600toCayman : Predicate< def INTERP_PAIR_XY : AMDGPUShaderInst < (outs R600_TReg32_X:$dst0, R600_TReg32_Y:$dst1), - (ins i32imm:$src0, R600_Reg32:$src1, R600_Reg32:$src2), + (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2), "INTERP_PAIR_XY $src0 $src1 $src2 : $dst0 dst1", []>; def INTERP_PAIR_ZW : AMDGPUShaderInst < (outs R600_TReg32_Z:$dst0, R600_TReg32_W:$dst1), - (ins i32imm:$src0, R600_Reg32:$src1, R600_Reg32:$src2), + (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2), "INTERP_PAIR_ZW $src0 $src1 $src2 : $dst0 dst1", []>; @@ -617,6 +588,36 @@ def CONST_ADDRESS: SDNode<"AMDGPUISD::CONST_ADDRESS", [SDNPVariadic] >; +def DOT4 : SDNode<"AMDGPUISD::DOT4", + SDTypeProfile<1, 8, [SDTCisFP<0>, SDTCisVT<1, f32>, SDTCisVT<2, f32>, + SDTCisVT<3, f32>, SDTCisVT<4, f32>, SDTCisVT<5, f32>, + SDTCisVT<6, f32>, SDTCisVT<7, f32>, SDTCisVT<8, f32>]>, + [] +>; + +def TEXTURE_FETCH_Type : SDTypeProfile<1, 19, [SDTCisFP<0>]>; + +def TEXTURE_FETCH: SDNode<"AMDGPUISD::TEXTURE_FETCH", TEXTURE_FETCH_Type, []>; + +multiclass TexPattern<bits<32> TextureOp, Instruction inst, ValueType vt = v4f32> { +def : Pat<(TEXTURE_FETCH (i32 TextureOp), vt:$SRC_GPR, + (i32 imm:$srcx), (i32 imm:$srcy), (i32 imm:$srcz), (i32 imm:$srcw), + (i32 imm:$offsetx), (i32 imm:$offsety), (i32 imm:$offsetz), + (i32 imm:$DST_SEL_X), (i32 imm:$DST_SEL_Y), (i32 imm:$DST_SEL_Z), + (i32 imm:$DST_SEL_W), + (i32 imm:$RESOURCE_ID), (i32 imm:$SAMPLER_ID), + (i32 imm:$COORD_TYPE_X), (i32 imm:$COORD_TYPE_Y), (i32 imm:$COORD_TYPE_Z), + (i32 imm:$COORD_TYPE_W)), + (inst R600_Reg128:$SRC_GPR, + imm:$srcx, imm:$srcy, imm:$srcz, imm:$srcw, + imm:$offsetx, imm:$offsety, imm:$offsetz, + imm:$DST_SEL_X, imm:$DST_SEL_Y, imm:$DST_SEL_Z, + imm:$DST_SEL_W, + imm:$RESOURCE_ID, imm:$SAMPLER_ID, + imm:$COORD_TYPE_X, imm:$COORD_TYPE_Y, imm:$COORD_TYPE_Z, + imm:$COORD_TYPE_W)>; +} + //===----------------------------------------------------------------------===// // Interpolation Instructions //===----------------------------------------------------------------------===// @@ -814,12 +815,15 @@ class CF_ALU_WORD1 { let Word1{31} = BARRIER; } +def KCACHE : InstFlag<"printKCache">; + class ALU_CLAUSE<bits<4> inst, string OpName> : AMDGPUInst <(outs), -(ins i32imm:$ADDR, i32imm:$KCACHE_BANK0, i32imm:$KCACHE_BANK1, i32imm:$KCACHE_MODE0, i32imm:$KCACHE_MODE1, -i32imm:$KCACHE_ADDR0, i32imm:$KCACHE_ADDR1, i32imm:$COUNT), +(ins i32imm:$ADDR, i32imm:$KCACHE_BANK0, i32imm:$KCACHE_BANK1, +KCACHE:$KCACHE_MODE0, KCACHE:$KCACHE_MODE1, +i32imm:$KCACHE_ADDR0, i32imm:$KCACHE_ADDR1, +i32imm:$COUNT), !strconcat(OpName, " $COUNT, @$ADDR, " -"KC0[CB$KCACHE_BANK0:$KCACHE_ADDR0-$KCACHE_ADDR0+32]" -", KC1[CB$KCACHE_BANK1:$KCACHE_ADDR1-$KCACHE_ADDR1+32]"), +"KC0[$KCACHE_MODE0], KC1[$KCACHE_MODE1]"), [] >, CF_ALU_WORD0, CF_ALU_WORD1 { field bits<64> Inst; @@ -1128,92 +1132,70 @@ def CNDGT_INT : R600_3OP < // Texture instructions //===----------------------------------------------------------------------===// -def TEX_LD : R600_TEX < - 0x03, "TEX_LD", - [(set v4f32:$DST_GPR, (int_AMDGPU_txf v4f32:$SRC_GPR, - imm:$OFFSET_X, imm:$OFFSET_Y, imm:$OFFSET_Z, imm:$RESOURCE_ID, - imm:$SAMPLER_ID, imm:$textureTarget))] -> { -let AsmString = "TEX_LD $DST_GPR, $SRC_GPR, $OFFSET_X, $OFFSET_Y, $OFFSET_Z," - "$RESOURCE_ID, $SAMPLER_ID, $textureTarget"; -let InOperandList = (ins R600_Reg128:$SRC_GPR, i32imm:$OFFSET_X, - i32imm:$OFFSET_Y, i32imm:$OFFSET_Z, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID, - i32imm:$textureTarget); -} - -def TEX_GET_TEXTURE_RESINFO : R600_TEX < - 0x04, "TEX_GET_TEXTURE_RESINFO", - [(set v4f32:$DST_GPR, (int_AMDGPU_txq v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] ->; - -def TEX_GET_GRADIENTS_H : R600_TEX < - 0x07, "TEX_GET_GRADIENTS_H", - [(set v4f32:$DST_GPR, (int_AMDGPU_ddx v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] ->; - -def TEX_GET_GRADIENTS_V : R600_TEX < - 0x08, "TEX_GET_GRADIENTS_V", - [(set v4f32:$DST_GPR, (int_AMDGPU_ddy v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] ->; - -def TEX_SET_GRADIENTS_H : R600_TEX < - 0x0B, "TEX_SET_GRADIENTS_H", - [] ->; - -def TEX_SET_GRADIENTS_V : R600_TEX < - 0x0C, "TEX_SET_GRADIENTS_V", - [] ->; +let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { -def TEX_SAMPLE : R600_TEX < - 0x10, "TEX_SAMPLE", - [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] ->; +class R600_TEX <bits<11> inst, string opName> : + InstR600 <(outs R600_Reg128:$DST_GPR), + (ins R600_Reg128:$SRC_GPR, + RSel:$srcx, RSel:$srcy, RSel:$srcz, RSel:$srcw, + i32imm:$offsetx, i32imm:$offsety, i32imm:$offsetz, + RSel:$DST_SEL_X, RSel:$DST_SEL_Y, RSel:$DST_SEL_Z, RSel:$DST_SEL_W, + i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID, + CT:$COORD_TYPE_X, CT:$COORD_TYPE_Y, CT:$COORD_TYPE_Z, + CT:$COORD_TYPE_W), + !strconcat(opName, + " $DST_GPR.$DST_SEL_X$DST_SEL_Y$DST_SEL_Z$DST_SEL_W, " + "$SRC_GPR.$srcx$srcy$srcz$srcw " + "RID:$RESOURCE_ID SID:$SAMPLER_ID " + "CT:$COORD_TYPE_X$COORD_TYPE_Y$COORD_TYPE_Z$COORD_TYPE_W"), + [], + NullALU>, TEX_WORD0, TEX_WORD1, TEX_WORD2 { + let Inst{31-0} = Word0; + let Inst{63-32} = Word1; -def TEX_SAMPLE_C : R600_TEX < - 0x18, "TEX_SAMPLE_C", - [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))] ->; + let TEX_INST = inst{4-0}; + let SRC_REL = 0; + let DST_REL = 0; + let LOD_BIAS = 0; -def TEX_SAMPLE_L : R600_TEX < - 0x11, "TEX_SAMPLE_L", - [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] ->; + let INST_MOD = 0; + let FETCH_WHOLE_QUAD = 0; + let ALT_CONST = 0; + let SAMPLER_INDEX_MODE = 0; + let RESOURCE_INDEX_MODE = 0; -def TEX_SAMPLE_C_L : R600_TEX < - 0x19, "TEX_SAMPLE_C_L", - [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))] ->; + let TEXInst = 1; +} -def TEX_SAMPLE_LB : R600_TEX < - 0x12, "TEX_SAMPLE_LB", - [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))] ->; +} // End mayLoad = 0, mayStore = 0, hasSideEffects = 0 -def TEX_SAMPLE_C_LB : R600_TEX < - 0x1A, "TEX_SAMPLE_C_LB", - [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR, - imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))] ->; -def TEX_SAMPLE_G : R600_TEX < - 0x14, "TEX_SAMPLE_G", - [] ->; -def TEX_SAMPLE_C_G : R600_TEX < - 0x1C, "TEX_SAMPLE_C_G", - [] ->; +def TEX_SAMPLE : R600_TEX <0x10, "TEX_SAMPLE">; +def TEX_SAMPLE_C : R600_TEX <0x18, "TEX_SAMPLE_C">; +def TEX_SAMPLE_L : R600_TEX <0x11, "TEX_SAMPLE_L">; +def TEX_SAMPLE_C_L : R600_TEX <0x19, "TEX_SAMPLE_C_L">; +def TEX_SAMPLE_LB : R600_TEX <0x12, "TEX_SAMPLE_LB">; +def TEX_SAMPLE_C_LB : R600_TEX <0x1A, "TEX_SAMPLE_C_LB">; +def TEX_LD : R600_TEX <0x03, "TEX_LD">; +def TEX_GET_TEXTURE_RESINFO : R600_TEX <0x04, "TEX_GET_TEXTURE_RESINFO">; +def TEX_GET_GRADIENTS_H : R600_TEX <0x07, "TEX_GET_GRADIENTS_H">; +def TEX_GET_GRADIENTS_V : R600_TEX <0x08, "TEX_GET_GRADIENTS_V">; +def TEX_SET_GRADIENTS_H : R600_TEX <0x0B, "TEX_SET_GRADIENTS_H">; +def TEX_SET_GRADIENTS_V : R600_TEX <0x0C, "TEX_SET_GRADIENTS_V">; +def TEX_SAMPLE_G : R600_TEX <0x14, "TEX_SAMPLE_G">; +def TEX_SAMPLE_C_G : R600_TEX <0x1C, "TEX_SAMPLE_C_G">; + +defm : TexPattern<0, TEX_SAMPLE>; +defm : TexPattern<1, TEX_SAMPLE_C>; +defm : TexPattern<2, TEX_SAMPLE_L>; +defm : TexPattern<3, TEX_SAMPLE_C_L>; +defm : TexPattern<4, TEX_SAMPLE_LB>; +defm : TexPattern<5, TEX_SAMPLE_C_LB>; +defm : TexPattern<6, TEX_LD, v4i32>; +defm : TexPattern<7, TEX_GET_TEXTURE_RESINFO, v4i32>; +defm : TexPattern<8, TEX_GET_GRADIENTS_H>; +defm : TexPattern<9, TEX_GET_GRADIENTS_V>; //===----------------------------------------------------------------------===// // Helper classes for common instructions @@ -1249,17 +1231,49 @@ class CNDGE_Common <bits<5> inst> : R600_3OP < [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_GE))] >; -multiclass DOT4_Common <bits<11> inst> { - - def _pseudo : R600_REDUCTION <inst, - (ins R600_Reg128:$src0, R600_Reg128:$src1), - "DOT4 $dst $src0, $src1", - [(set f32:$dst, (int_AMDGPU_dp4 v4f32:$src0, v4f32:$src1))] - >; - def _real : R600_2OP <inst, "DOT4", []>; +let isCodeGenOnly = 1, isPseudo = 1, Namespace = "AMDGPU" in { +class R600_VEC2OP<list<dag> pattern> : InstR600 <(outs R600_Reg32:$dst), (ins +// Slot X + UEM:$update_exec_mask_X, UP:$update_pred_X, WRITE:$write_X, + OMOD:$omod_X, REL:$dst_rel_X, CLAMP:$clamp_X, + R600_TReg32_X:$src0_X, NEG:$src0_neg_X, REL:$src0_rel_X, ABS:$src0_abs_X, SEL:$src0_sel_X, + R600_TReg32_X:$src1_X, NEG:$src1_neg_X, REL:$src1_rel_X, ABS:$src1_abs_X, SEL:$src1_sel_X, + R600_Pred:$pred_sel_X, +// Slot Y + UEM:$update_exec_mask_Y, UP:$update_pred_Y, WRITE:$write_Y, + OMOD:$omod_Y, REL:$dst_rel_Y, CLAMP:$clamp_Y, + R600_TReg32_Y:$src0_Y, NEG:$src0_neg_Y, REL:$src0_rel_Y, ABS:$src0_abs_Y, SEL:$src0_sel_Y, + R600_TReg32_Y:$src1_Y, NEG:$src1_neg_Y, REL:$src1_rel_Y, ABS:$src1_abs_Y, SEL:$src1_sel_Y, + R600_Pred:$pred_sel_Y, +// Slot Z + UEM:$update_exec_mask_Z, UP:$update_pred_Z, WRITE:$write_Z, + OMOD:$omod_Z, REL:$dst_rel_Z, CLAMP:$clamp_Z, + R600_TReg32_Z:$src0_Z, NEG:$src0_neg_Z, REL:$src0_rel_Z, ABS:$src0_abs_Z, SEL:$src0_sel_Z, + R600_TReg32_Z:$src1_Z, NEG:$src1_neg_Z, REL:$src1_rel_Z, ABS:$src1_abs_Z, SEL:$src1_sel_Z, + R600_Pred:$pred_sel_Z, +// Slot W + UEM:$update_exec_mask_W, UP:$update_pred_W, WRITE:$write_W, + OMOD:$omod_W, REL:$dst_rel_W, CLAMP:$clamp_W, + R600_TReg32_W:$src0_W, NEG:$src0_neg_W, REL:$src0_rel_W, ABS:$src0_abs_W, SEL:$src0_sel_W, + R600_TReg32_W:$src1_W, NEG:$src1_neg_W, REL:$src1_rel_W, ABS:$src1_abs_W, SEL:$src1_sel_W, + R600_Pred:$pred_sel_W, + LITERAL:$literal0, LITERAL:$literal1), + "", + pattern, + AnyALU> {} } +def DOT_4 : R600_VEC2OP<[(set R600_Reg32:$dst, (DOT4 + R600_TReg32_X:$src0_X, R600_TReg32_X:$src1_X, + R600_TReg32_Y:$src0_Y, R600_TReg32_Y:$src1_Y, + R600_TReg32_Z:$src0_Z, R600_TReg32_Z:$src1_Z, + R600_TReg32_W:$src0_W, R600_TReg32_W:$src1_W))]>; + + +class DOT4_Common <bits<11> inst> : R600_2OP <inst, "DOT4", []>; + + let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { multiclass CUBE_Common <bits<11> inst> { @@ -1432,7 +1446,7 @@ let Predicates = [isR600] in { def CNDE_r600 : CNDE_Common<0x18>; def CNDGT_r600 : CNDGT_Common<0x19>; def CNDGE_r600 : CNDGE_Common<0x1A>; - defm DOT4_r600 : DOT4_Common<0x50>; + def DOT4_r600 : DOT4_Common<0x50>; defm CUBE_r600 : CUBE_Common<0x52>; def EXP_IEEE_r600 : EXP_IEEE_Common<0x61>; def LOG_CLAMPED_r600 : LOG_CLAMPED_Common<0x62>; @@ -1611,14 +1625,13 @@ let Predicates = [isEGorCayman] in { i32:$src2))], VecALU >; + def : BFEPattern <BFE_UINT_eg>; - def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", []>; + def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", [], VecALU>; defm : BFIPatterns <BFI_INT_eg>; - def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", - [(set i32:$dst, (AMDGPUbitalign i32:$src0, i32:$src1, i32:$src2))], - VecALU - >; + def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>; + def : ROTRPattern <BIT_ALIGN_INT_eg>; def MULADD_eg : MULADD_Common<0x14>; def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>; @@ -1630,7 +1643,7 @@ let Predicates = [isEGorCayman] in { def CNDGE_eg : CNDGE_Common<0x1B>; def MUL_LIT_eg : MUL_LIT_Common<0x1F>; def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>; - defm DOT4_eg : DOT4_Common<0xBE>; + def DOT4_eg : DOT4_Common<0xBE>; defm CUBE_eg : CUBE_Common<0xC0>; let hasSideEffects = 1 in { @@ -1665,6 +1678,9 @@ let hasSideEffects = 1 in { def : Pat<(fp_to_uint f32:$src0), (FLT_TO_UINT_eg (TRUNC $src0))>; + // SHA-256 Patterns + def : SHA256MaPattern <BFI_INT_eg, XOR_INT>; + def EG_ExportSwz : ExportSwzInst { let Word1{19-16} = 0; // BURST_COUNT let Word1{20} = 1; // VALID_PIXEL_MODE @@ -1743,8 +1759,7 @@ let usesCustomInserter = 1 in { class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> comp_mask, string name, list<dag> pattern> - : EG_CF_RAT <0x57, 0x2, 0, (outs), ins, - !strconcat(name, " $rw_gpr, $index_gpr, $eop"), pattern> { + : EG_CF_RAT <0x57, 0x2, 0, (outs), ins, name, pattern> { let RIM = 0; // XXX: Have a separate instruction for non-indexed writes. let TYPE = 1; @@ -1764,19 +1779,19 @@ class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> comp_mask, string name, // 32-bit store def RAT_WRITE_CACHELESS_32_eg : RAT_WRITE_CACHELESS_eg < (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), - 0x1, "RAT_WRITE_CACHELESS_32_eg", + 0x1, "RAT_WRITE_CACHELESS_32_eg $rw_gpr, $index_gpr, $eop", [(global_store i32:$rw_gpr, i32:$index_gpr)] >; //128-bit store def RAT_WRITE_CACHELESS_128_eg : RAT_WRITE_CACHELESS_eg < (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop), - 0xf, "RAT_WRITE_CACHELESS_128", + 0xf, "RAT_WRITE_CACHELESS_128 $rw_gpr.XYZW, $index_gpr, $eop", [(global_store v4i32:$rw_gpr, i32:$index_gpr)] >; class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern> - : InstR600ISA <outs, (ins MEMxi:$ptr), name#" $dst, $ptr", pattern>, + : InstR600ISA <outs, (ins MEMxi:$ptr), name, pattern>, VTX_WORD1_GPR, VTX_WORD0 { // Static fields @@ -1831,7 +1846,7 @@ class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern> } class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_8", buffer_id, (outs R600_TReg32_X:$dst), + : VTX_READ_eg <"VTX_READ_8 $dst, $ptr", buffer_id, (outs R600_TReg32_X:$dst), pattern> { let MEGA_FETCH_COUNT = 1; @@ -1843,7 +1858,7 @@ class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern> } class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_16", buffer_id, (outs R600_TReg32_X:$dst), + : VTX_READ_eg <"VTX_READ_16 $dst, $ptr", buffer_id, (outs R600_TReg32_X:$dst), pattern> { let MEGA_FETCH_COUNT = 2; let DST_SEL_X = 0; @@ -1855,7 +1870,7 @@ class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern> } class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_32", buffer_id, (outs R600_TReg32_X:$dst), + : VTX_READ_eg <"VTX_READ_32 $dst, $ptr", buffer_id, (outs R600_TReg32_X:$dst), pattern> { let MEGA_FETCH_COUNT = 4; @@ -1876,7 +1891,7 @@ class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern> } class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern> - : VTX_READ_eg <"VTX_READ_128", buffer_id, (outs R600_Reg128:$dst), + : VTX_READ_eg <"VTX_READ_128 $dst.XYZW, $ptr", buffer_id, (outs R600_Reg128:$dst), pattern> { let MEGA_FETCH_COUNT = 16; diff --git a/lib/Target/R600/R600Intrinsics.td b/lib/Target/R600/R600Intrinsics.td index dc8980a..58d86b6 100644 --- a/lib/Target/R600/R600Intrinsics.td +++ b/lib/Target/R600/R600Intrinsics.td @@ -12,12 +12,49 @@ //===----------------------------------------------------------------------===// let TargetPrefix = "R600", isTarget = 1 in { + class TextureIntrinsicFloatInput : + Intrinsic<[llvm_v4f32_ty], [ + llvm_v4f32_ty, // Coord + llvm_i32_ty, // offset_x + llvm_i32_ty, // offset_y, + llvm_i32_ty, // offset_z, + llvm_i32_ty, // resource_id + llvm_i32_ty, // samplerid + llvm_i32_ty, // coord_type_x + llvm_i32_ty, // coord_type_y + llvm_i32_ty, // coord_type_z + llvm_i32_ty // coord_type_w + ], [IntrNoMem]>; + class TextureIntrinsicInt32Input : + Intrinsic<[llvm_v4i32_ty], [ + llvm_v4i32_ty, // Coord + llvm_i32_ty, // offset_x + llvm_i32_ty, // offset_y, + llvm_i32_ty, // offset_z, + llvm_i32_ty, // resource_id + llvm_i32_ty, // samplerid + llvm_i32_ty, // coord_type_x + llvm_i32_ty, // coord_type_y + llvm_i32_ty, // coord_type_z + llvm_i32_ty // coord_type_w + ], [IntrNoMem]>; + def int_R600_load_input : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>; def int_R600_interp_input : Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; def int_R600_load_texbuf : Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + def int_R600_tex : TextureIntrinsicFloatInput; + def int_R600_texc : TextureIntrinsicFloatInput; + def int_R600_txl : TextureIntrinsicFloatInput; + def int_R600_txlc : TextureIntrinsicFloatInput; + def int_R600_txb : TextureIntrinsicFloatInput; + def int_R600_txbc : TextureIntrinsicFloatInput; + def int_R600_txf : TextureIntrinsicInt32Input; + def int_R600_txq : TextureIntrinsicInt32Input; + def int_R600_ddx : TextureIntrinsicFloatInput; + def int_R600_ddy : TextureIntrinsicFloatInput; def int_R600_store_swizzle : Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []>; def int_R600_store_stream_output : diff --git a/lib/Target/R600/R600MachineFunctionInfo.h b/lib/Target/R600/R600MachineFunctionInfo.h index 70fddbb..f23d9b7 100644 --- a/lib/Target/R600/R600MachineFunctionInfo.h +++ b/lib/Target/R600/R600MachineFunctionInfo.h @@ -13,9 +13,9 @@ #ifndef R600MACHINEFUNCTIONINFO_H #define R600MACHINEFUNCTIONINFO_H +#include "AMDGPUMachineFunction.h" #include "llvm/ADT/BitVector.h" #include "llvm/CodeGen/SelectionDAG.h" -#include "AMDGPUMachineFunction.h" #include <vector> namespace llvm { diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp index a777142..a330d88 100644 --- a/lib/Target/R600/R600MachineScheduler.cpp +++ b/lib/Target/R600/R600MachineScheduler.cpp @@ -16,12 +16,11 @@ #define DEBUG_TYPE "misched" #include "R600MachineScheduler.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Pass.h" #include "llvm/PassManager.h" #include "llvm/Support/raw_ostream.h" -#include <set> using namespace llvm; @@ -31,53 +30,87 @@ void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { TII = static_cast<const R600InstrInfo*>(DAG->TII); TRI = static_cast<const R600RegisterInfo*>(DAG->TRI); MRI = &DAG->MRI; - Available[IDAlu]->clear(); - Available[IDFetch]->clear(); - Available[IDOther]->clear(); CurInstKind = IDOther; CurEmitted = 0; OccupedSlotsMask = 15; InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); - + InstKindLimit[IDOther] = 32; const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>(); - if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD5XXX) { - InstKindLimit[IDFetch] = 7; // 8 minus 1 for security - } else { - InstKindLimit[IDFetch] = 15; // 16 minus 1 for security - } + InstKindLimit[IDFetch] = ST.getTexVTXClauseSize(); + AluInstCount = 0; + FetchInstCount = 0; } -void R600SchedStrategy::MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst) +void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc, + std::vector<SUnit *> &QDst) { - if (QSrc->empty()) - return; - for (ReadyQueue::iterator I = QSrc->begin(), - E = QSrc->end(); I != E; ++I) { - (*I)->NodeQueueId &= ~QSrc->getID(); - QDst->push(*I); - } - QSrc->clear(); + QDst.insert(QDst.end(), QSrc.begin(), QSrc.end()); + QSrc.clear(); +} + +static +unsigned getWFCountLimitedByGPR(unsigned GPRCount) { + assert (GPRCount && "GPRCount cannot be 0"); + return 248 / GPRCount; } SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { SUnit *SU = 0; - IsTopNode = true; NextInstKind = IDOther; + IsTopNode = false; + // check if we might want to switch current clause type - bool AllowSwitchToAlu = (CurInstKind == IDOther) || - (CurEmitted > InstKindLimit[CurInstKind]) || - (Available[CurInstKind]->empty()); - bool AllowSwitchFromAlu = (CurEmitted > InstKindLimit[CurInstKind]) && - (!Available[IDFetch]->empty() || !Available[IDOther]->empty()); - - if ((AllowSwitchToAlu && CurInstKind != IDAlu) || - (!AllowSwitchFromAlu && CurInstKind == IDAlu)) { + bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) || + (Available[CurInstKind].empty()); + bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) && + (!Available[IDFetch].empty() || !Available[IDOther].empty()); + + if (CurInstKind == IDAlu && !Available[IDFetch].empty()) { + // We use the heuristic provided by AMD Accelerated Parallel Processing + // OpenCL Programming Guide : + // The approx. number of WF that allows TEX inst to hide ALU inst is : + // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU)) + float ALUFetchRationEstimate = + (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) / + (FetchInstCount + Available[IDFetch].size()); + unsigned NeededWF = 62.5f / ALUFetchRationEstimate; + DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" ); + // We assume the local GPR requirements to be "dominated" by the requirement + // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and + // after TEX are indeed likely to consume or generate values from/for the + // TEX clause. + // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause + // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need + // one GPR) or TmXYZW = TnXYZW (need 2 GPR). + // (TODO : use RegisterPressure) + // If we are going too use too many GPR, we flush Fetch instruction to lower + // register pressure on 128 bits regs. + unsigned NearRegisterRequirement = 2 * Available[IDFetch].size(); + if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement)) + AllowSwitchFromAlu = true; + } + + + // We want to scheduled AR defs as soon as possible to make sure they aren't + // put in a different ALU clause from their uses. + if (!SU && !UnscheduledARDefs.empty()) { + SU = UnscheduledARDefs[0]; + UnscheduledARDefs.erase(UnscheduledARDefs.begin()); + NextInstKind = IDAlu; + } + + if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) || + (!AllowSwitchFromAlu && CurInstKind == IDAlu))) { // try to pick ALU SU = pickAlu(); + if (!SU && !PhysicalRegCopy.empty()) { + SU = PhysicalRegCopy.front(); + PhysicalRegCopy.erase(PhysicalRegCopy.begin()); + } if (SU) { - if (CurEmitted > InstKindLimit[IDAlu]) + if (CurEmitted >= InstKindLimit[IDAlu]) CurEmitted = 0; NextInstKind = IDAlu; } @@ -97,16 +130,21 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { NextInstKind = IDOther; } + // We want to schedule the AR uses as late as possible to make sure that + // the AR defs have been released. + if (!SU && !UnscheduledARUses.empty()) { + SU = UnscheduledARUses[0]; + UnscheduledARUses.erase(UnscheduledARUses.begin()); + NextInstKind = IDAlu; + } + + DEBUG( if (SU) { - dbgs() << "picked node: "; + dbgs() << " ** Pick node **\n"; SU->dump(DAG); } else { - dbgs() << "NO NODE "; - for (int i = 0; i < IDLast; ++i) { - Available[i]->dump(); - Pending[i]->dump(); - } + dbgs() << "NO NODE \n"; for (unsigned i = 0; i < DAG->SUnits.size(); i++) { const SUnit &S = DAG->SUnits[i]; if (!S.isScheduled) @@ -119,10 +157,6 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { } void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { - - DEBUG(dbgs() << "scheduled: "); - DEBUG(SU->dump(DAG)); - if (NextInstKind != CurInstKind) { DEBUG(dbgs() << "Instruction Type Switch\n"); if (NextInstKind != IDAlu) @@ -132,6 +166,7 @@ void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { } if (CurInstKind == IDAlu) { + AluInstCount ++; switch (getAluKind(SU)) { case AluT_XYZW: CurEmitted += 4; @@ -157,20 +192,51 @@ void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { if (CurInstKind != IDFetch) { MoveUnits(Pending[IDFetch], Available[IDFetch]); - } - MoveUnits(Pending[IDOther], Available[IDOther]); + } else + FetchInstCount++; } -void R600SchedStrategy::releaseTopNode(SUnit *SU) { - int IK = getInstKind(SU); +static bool +isPhysicalRegCopy(MachineInstr *MI) { + if (MI->getOpcode() != AMDGPU::COPY) + return false; - DEBUG(dbgs() << IK << " <= "); - DEBUG(SU->dump(DAG)); + return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()); +} - Pending[IK]->push(SU); +void R600SchedStrategy::releaseTopNode(SUnit *SU) { + DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG);); } void R600SchedStrategy::releaseBottomNode(SUnit *SU) { + DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG);); + if (isPhysicalRegCopy(SU->getInstr())) { + PhysicalRegCopy.push_back(SU); + return; + } + + int IK = getInstKind(SU); + + // Check for AR register defines + for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(), + E = SU->getInstr()->operands_end(); + I != E; ++I) { + if (I->isReg() && I->getReg() == AMDGPU::AR_X) { + if (I->isDef()) { + UnscheduledARDefs.push_back(SU); + } else { + UnscheduledARUses.push_back(SU); + } + return; + } + } + + // There is no export clause, we can schedule one as soon as its ready + if (IK == IDOther) + Available[IDOther].push_back(SU); + else + Pending[IK].push_back(SU); + } bool R600SchedStrategy::regBelongsToClass(unsigned Reg, @@ -186,17 +252,15 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { MachineInstr *MI = SU->getInstr(); switch (MI->getOpcode()) { + case AMDGPU::PRED_X: + return AluPredX; case AMDGPU::INTERP_PAIR_XY: case AMDGPU::INTERP_PAIR_ZW: case AMDGPU::INTERP_VEC_LOAD: + case AMDGPU::DOT_4: return AluT_XYZW; case AMDGPU::COPY: - if (TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) { - // %vregX = COPY Tn_X is likely to be discarded in favor of an - // assignement of Tn_X to %vregX, don't considers it in scheduling - return AluDiscarded; - } - else if (MI->getOperand(1).isUndef()) { + if (MI->getOperand(1).isUndef()) { // MI will become a KILL, don't considers it in scheduling return AluDiscarded; } @@ -246,57 +310,37 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { int R600SchedStrategy::getInstKind(SUnit* SU) { int Opcode = SU->getInstr()->getOpcode(); + if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode)) + return IDFetch; + if (TII->isALUInstr(Opcode)) { return IDAlu; } switch (Opcode) { + case AMDGPU::PRED_X: case AMDGPU::COPY: case AMDGPU::CONST_COPY: case AMDGPU::INTERP_PAIR_XY: case AMDGPU::INTERP_PAIR_ZW: case AMDGPU::INTERP_VEC_LOAD: - case AMDGPU::DOT4_eg_pseudo: - case AMDGPU::DOT4_r600_pseudo: + case AMDGPU::DOT_4: return IDAlu; - case AMDGPU::TEX_VTX_CONSTBUF: - case AMDGPU::TEX_VTX_TEXBUF: - case AMDGPU::TEX_LD: - case AMDGPU::TEX_GET_TEXTURE_RESINFO: - case AMDGPU::TEX_GET_GRADIENTS_H: - case AMDGPU::TEX_GET_GRADIENTS_V: - case AMDGPU::TEX_SET_GRADIENTS_H: - case AMDGPU::TEX_SET_GRADIENTS_V: - case AMDGPU::TEX_SAMPLE: - case AMDGPU::TEX_SAMPLE_C: - case AMDGPU::TEX_SAMPLE_L: - case AMDGPU::TEX_SAMPLE_C_L: - case AMDGPU::TEX_SAMPLE_LB: - case AMDGPU::TEX_SAMPLE_C_LB: - case AMDGPU::TEX_SAMPLE_G: - case AMDGPU::TEX_SAMPLE_C_G: - case AMDGPU::TXD: - case AMDGPU::TXD_SHADOW: - return IDFetch; default: - DEBUG( - dbgs() << "other inst: "; - SU->dump(DAG); - ); return IDOther; } } -SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) { +SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q) { if (Q.empty()) return NULL; - for (std::set<SUnit *, CompareSUnit>::iterator It = Q.begin(), E = Q.end(); + for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend(); It != E; ++It) { SUnit *SU = *It; InstructionsGroupCandidate.push_back(SU->getInstr()); if (TII->canBundle(InstructionsGroupCandidate)) { InstructionsGroupCandidate.pop_back(); - Q.erase(It); + Q.erase((It + 1).base()); return SU; } else { InstructionsGroupCandidate.pop_back(); @@ -306,14 +350,12 @@ SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) { } void R600SchedStrategy::LoadAlu() { - ReadyQueue *QSrc = Pending[IDAlu]; - for (ReadyQueue::iterator I = QSrc->begin(), - E = QSrc->end(); I != E; ++I) { - (*I)->NodeQueueId &= ~QSrc->getID(); - AluKind AK = getAluKind(*I); - AvailableAlus[AK].insert(*I); - } - QSrc->clear(); + std::vector<SUnit *> &QSrc = Pending[IDAlu]; + for (unsigned i = 0, e = QSrc.size(); i < e; ++i) { + AluKind AK = getAluKind(QSrc[i]); + AvailableAlus[AK].push_back(QSrc[i]); + } + QSrc.clear(); } void R600SchedStrategy::PrepareNextSlot() { @@ -355,35 +397,29 @@ void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) { static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W}; SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]); - SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]); - if (!UnslotedSU) { + if (SlotedSU) return SlotedSU; - } else if (!SlotedSU) { + SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]); + if (UnslotedSU) AssignSlot(UnslotedSU->getInstr(), Slot); - return UnslotedSU; - } else { - //Determine which one to pick (the lesser one) - if (CompareSUnit()(SlotedSU, UnslotedSU)) { - AvailableAlus[AluAny].insert(UnslotedSU); - return SlotedSU; - } else { - AvailableAlus[IndexToID[Slot]].insert(SlotedSU); - AssignSlot(UnslotedSU->getInstr(), Slot); - return UnslotedSU; - } - } + return UnslotedSU; } -bool R600SchedStrategy::isAvailablesAluEmpty() const { - return Pending[IDAlu]->empty() && AvailableAlus[AluAny].empty() && - AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() && - AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() && - AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty(); +unsigned R600SchedStrategy::AvailablesAluCount() const { + return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() + + AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() + + AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() + + AvailableAlus[AluDiscarded].size() + AvailableAlus[AluPredX].size(); } SUnit* R600SchedStrategy::pickAlu() { - while (!isAvailablesAluEmpty()) { + while (AvailablesAluCount() || !Pending[IDAlu].empty()) { if (!OccupedSlotsMask) { + // Bottom up scheduling : predX must comes first + if (!AvailableAlus[AluPredX].empty()) { + OccupedSlotsMask = 15; + return PopInst(AvailableAlus[AluPredX]); + } // Flush physical reg copies (RA will discard them) if (!AvailableAlus[AluDiscarded].empty()) { OccupedSlotsMask = 15; @@ -395,7 +431,7 @@ SUnit* R600SchedStrategy::pickAlu() { return PopInst(AvailableAlus[AluT_XYZW]); } } - for (unsigned Chan = 0; Chan < 4; ++Chan) { + for (int Chan = 3; Chan > -1; --Chan) { bool isOccupied = OccupedSlotsMask & (1 << Chan); if (!isOccupied) { SUnit *SU = AttemptFillSlot(Chan); @@ -413,14 +449,14 @@ SUnit* R600SchedStrategy::pickAlu() { SUnit* R600SchedStrategy::pickOther(int QID) { SUnit *SU = 0; - ReadyQueue *AQ = Available[QID]; + std::vector<SUnit *> &AQ = Available[QID]; - if (AQ->empty()) { + if (AQ.empty()) { MoveUnits(Pending[QID], AQ); } - if (!AQ->empty()) { - SU = *AQ->begin(); - AQ->remove(AQ->begin()); + if (!AQ.empty()) { + SU = AQ.back(); + AQ.resize(AQ.size() - 1); } return SU; } diff --git a/lib/Target/R600/R600MachineScheduler.h b/lib/Target/R600/R600MachineScheduler.h index 3d0367f..aae8b3f 100644 --- a/lib/Target/R600/R600MachineScheduler.h +++ b/lib/Target/R600/R600MachineScheduler.h @@ -16,21 +16,14 @@ #define R600MACHINESCHEDULER_H_ #include "R600InstrInfo.h" +#include "llvm/ADT/PriorityQueue.h" #include "llvm/CodeGen/MachineScheduler.h" #include "llvm/Support/Debug.h" -#include "llvm/ADT/PriorityQueue.h" using namespace llvm; namespace llvm { -class CompareSUnit { -public: - bool operator()(const SUnit *S1, const SUnit *S2) { - return S1->getDepth() > S2->getDepth(); - } -}; - class R600SchedStrategy : public MachineSchedStrategy { const ScheduleDAGMI *DAG; @@ -38,12 +31,6 @@ class R600SchedStrategy : public MachineSchedStrategy { const R600RegisterInfo *TRI; MachineRegisterInfo *MRI; - enum InstQueue { - QAlu = 1, - QFetch = 2, - QOther = 4 - }; - enum InstKind { IDAlu, IDFetch, @@ -58,17 +45,24 @@ class R600SchedStrategy : public MachineSchedStrategy { AluT_Z, AluT_W, AluT_XYZW, + AluPredX, AluDiscarded, // LLVM Instructions that are going to be eliminated AluLast }; - ReadyQueue *Available[IDLast], *Pending[IDLast]; - std::multiset<SUnit *, CompareSUnit> AvailableAlus[AluLast]; + std::vector<SUnit *> Available[IDLast], Pending[IDLast]; + std::vector<SUnit *> AvailableAlus[AluLast]; + std::vector<SUnit *> UnscheduledARDefs; + std::vector<SUnit *> UnscheduledARUses; + std::vector<SUnit *> PhysicalRegCopy; InstKind CurInstKind; int CurEmitted; InstKind NextInstKind; + unsigned AluInstCount; + unsigned FetchInstCount; + int InstKindLimit[IDLast]; int OccupedSlotsMask; @@ -76,19 +70,9 @@ class R600SchedStrategy : public MachineSchedStrategy { public: R600SchedStrategy() : DAG(0), TII(0), TRI(0), MRI(0) { - Available[IDAlu] = new ReadyQueue(QAlu, "AAlu"); - Available[IDFetch] = new ReadyQueue(QFetch, "AFetch"); - Available[IDOther] = new ReadyQueue(QOther, "AOther"); - Pending[IDAlu] = new ReadyQueue(QAlu<<4, "PAlu"); - Pending[IDFetch] = new ReadyQueue(QFetch<<4, "PFetch"); - Pending[IDOther] = new ReadyQueue(QOther<<4, "POther"); } virtual ~R600SchedStrategy() { - for (unsigned I = 0; I < IDLast; ++I) { - delete Available[I]; - delete Pending[I]; - } } virtual void initialize(ScheduleDAGMI *dag); @@ -104,15 +88,15 @@ private: bool regBelongsToClass(unsigned Reg, const TargetRegisterClass *RC) const; AluKind getAluKind(SUnit *SU) const; void LoadAlu(); - bool isAvailablesAluEmpty() const; + unsigned AvailablesAluCount() const; SUnit *AttemptFillSlot (unsigned Slot); void PrepareNextSlot(); - SUnit *PopInst(std::multiset<SUnit *, CompareSUnit> &Q); + SUnit *PopInst(std::vector<SUnit*> &Q); void AssignSlot(MachineInstr *MI, unsigned Slot); SUnit* pickAlu(); SUnit* pickOther(int QID); - void MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst); + void MoveUnits(std::vector<SUnit *> &QSrc, std::vector<SUnit *> &QDst); }; } // namespace llvm diff --git a/lib/Target/R600/R600OptimizeVectorRegisters.cpp b/lib/Target/R600/R600OptimizeVectorRegisters.cpp new file mode 100644 index 0000000..4636426 --- /dev/null +++ b/lib/Target/R600/R600OptimizeVectorRegisters.cpp @@ -0,0 +1,372 @@ +//===--------------------- R600MergeVectorRegisters.cpp -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This pass merges inputs of swizzeable instructions into vector sharing +/// common data and/or have enough undef subreg using swizzle abilities. +/// +/// For instance let's consider the following pseudo code : +/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3 +/// ... +/// vreg7<def> = REG_SEQ vreg1, sub0, vreg3, sub1, undef, sub2, vreg4, sub3 +/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub1, sub2, sub3 +/// +/// is turned into : +/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3 +/// ... +/// vreg7<def> = INSERT_SUBREG vreg4, sub3 +/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub2, sub1, sub3 +/// +/// This allow regalloc to reduce register pressure for vector registers and +/// to reduce MOV count. +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "vec-merger" +#include "llvm/Support/Debug.h" +#include "AMDGPU.h" +#include "R600InstrInfo.h" +#include "llvm/CodeGen/DFAPacketizer.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +using namespace llvm; + +namespace { + +static bool +isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) { + for (MachineRegisterInfo::def_iterator It = MRI.def_begin(Reg), + E = MRI.def_end(); It != E; ++It) { + return (*It).isImplicitDef(); + } + llvm_unreachable("Reg without a def"); + return false; +} + +class RegSeqInfo { +public: + MachineInstr *Instr; + DenseMap<unsigned, unsigned> RegToChan; + std::vector<unsigned> UndefReg; + RegSeqInfo(MachineRegisterInfo &MRI, MachineInstr *MI) : Instr(MI) { + assert (MI->getOpcode() == AMDGPU::REG_SEQUENCE); + for (unsigned i = 1, e = Instr->getNumOperands(); i < e; i+=2) { + MachineOperand &MO = Instr->getOperand(i); + unsigned Chan = Instr->getOperand(i + 1).getImm(); + if (isImplicitlyDef(MRI, MO.getReg())) + UndefReg.push_back(Chan); + else + RegToChan[MO.getReg()] = Chan; + } + } + RegSeqInfo() {} + + bool operator==(const RegSeqInfo &RSI) const { + return RSI.Instr == Instr; + } +}; + +class R600VectorRegMerger : public MachineFunctionPass { +private: + MachineRegisterInfo *MRI; + const R600InstrInfo *TII; + bool canSwizzle(const MachineInstr &) const; + bool areAllUsesSwizzeable(unsigned Reg) const; + void SwizzleInput(MachineInstr &, + const std::vector<std::pair<unsigned, unsigned> > &) const; + bool tryMergeVector(const RegSeqInfo *, RegSeqInfo *, + std::vector<std::pair<unsigned, unsigned> > &Remap) const; + bool tryMergeUsingCommonSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI, + std::vector<std::pair<unsigned, unsigned> > &RemapChan); + bool tryMergeUsingFreeSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI, + std::vector<std::pair<unsigned, unsigned> > &RemapChan); + MachineInstr *RebuildVector(RegSeqInfo *MI, + const RegSeqInfo *BaseVec, + const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const; + void RemoveMI(MachineInstr *); + void trackRSI(const RegSeqInfo &RSI); + + typedef DenseMap<unsigned, std::vector<MachineInstr *> > InstructionSetMap; + DenseMap<MachineInstr *, RegSeqInfo> PreviousRegSeq; + InstructionSetMap PreviousRegSeqByReg; + InstructionSetMap PreviousRegSeqByUndefCount; +public: + static char ID; + R600VectorRegMerger(TargetMachine &tm) : MachineFunctionPass(ID), + TII(0) { } + + void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + AU.addRequired<MachineDominatorTree>(); + AU.addPreserved<MachineDominatorTree>(); + AU.addRequired<MachineLoopInfo>(); + AU.addPreserved<MachineLoopInfo>(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + const char *getPassName() const { + return "R600 Vector Registers Merge Pass"; + } + + bool runOnMachineFunction(MachineFunction &Fn); +}; + +char R600VectorRegMerger::ID = 0; + +bool R600VectorRegMerger::canSwizzle(const MachineInstr &MI) + const { + if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST) + return true; + switch (MI.getOpcode()) { + case AMDGPU::R600_ExportSwz: + case AMDGPU::EG_ExportSwz: + return true; + default: + return false; + } +} + +bool R600VectorRegMerger::tryMergeVector(const RegSeqInfo *Untouched, + RegSeqInfo *ToMerge, std::vector< std::pair<unsigned, unsigned> > &Remap) + const { + unsigned CurrentUndexIdx = 0; + for (DenseMap<unsigned, unsigned>::iterator It = ToMerge->RegToChan.begin(), + E = ToMerge->RegToChan.end(); It != E; ++It) { + DenseMap<unsigned, unsigned>::const_iterator PosInUntouched = + Untouched->RegToChan.find((*It).first); + if (PosInUntouched != Untouched->RegToChan.end()) { + Remap.push_back(std::pair<unsigned, unsigned> + ((*It).second, (*PosInUntouched).second)); + continue; + } + if (CurrentUndexIdx >= Untouched->UndefReg.size()) + return false; + Remap.push_back(std::pair<unsigned, unsigned> + ((*It).second, Untouched->UndefReg[CurrentUndexIdx++])); + } + + return true; +} + +static +unsigned getReassignedChan( + const std::vector<std::pair<unsigned, unsigned> > &RemapChan, + unsigned Chan) { + for (unsigned j = 0, je = RemapChan.size(); j < je; j++) { + if (RemapChan[j].first == Chan) + return RemapChan[j].second; + } + llvm_unreachable("Chan wasn't reassigned"); +} + +MachineInstr *R600VectorRegMerger::RebuildVector( + RegSeqInfo *RSI, const RegSeqInfo *BaseRSI, + const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const { + unsigned Reg = RSI->Instr->getOperand(0).getReg(); + MachineBasicBlock::iterator Pos = RSI->Instr; + MachineBasicBlock &MBB = *Pos->getParent(); + DebugLoc DL = Pos->getDebugLoc(); + + unsigned SrcVec = BaseRSI->Instr->getOperand(0).getReg(); + DenseMap<unsigned, unsigned> UpdatedRegToChan = BaseRSI->RegToChan; + std::vector<unsigned> UpdatedUndef = BaseRSI->UndefReg; + for (DenseMap<unsigned, unsigned>::iterator It = RSI->RegToChan.begin(), + E = RSI->RegToChan.end(); It != E; ++It) { + if (BaseRSI->RegToChan.find((*It).first) != BaseRSI->RegToChan.end()) { + UpdatedRegToChan[(*It).first] = (*It).second; + continue; + } + unsigned DstReg = MRI->createVirtualRegister(&AMDGPU::R600_Reg128RegClass); + unsigned SubReg = (*It).first; + unsigned Swizzle = (*It).second; + unsigned Chan = getReassignedChan(RemapChan, Swizzle); + + MachineInstr *Tmp = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::INSERT_SUBREG), + DstReg) + .addReg(SrcVec) + .addReg(SubReg) + .addImm(Chan); + UpdatedRegToChan[SubReg] = Chan; + std::vector<unsigned>::iterator ChanPos = + std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan); + if (ChanPos != UpdatedUndef.end()) + UpdatedUndef.erase(ChanPos); + assert(std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan) == + UpdatedUndef.end() && + "UpdatedUndef shouldn't contain Chan more than once!"); + DEBUG(dbgs() << " ->"; Tmp->dump();); + (void)Tmp; + SrcVec = DstReg; + } + Pos = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::COPY), Reg) + .addReg(SrcVec); + DEBUG(dbgs() << " ->"; Pos->dump();); + + DEBUG(dbgs() << " Updating Swizzle:\n"); + for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg), + E = MRI->use_end(); It != E; ++It) { + DEBUG(dbgs() << " ";(*It).dump(); dbgs() << " ->"); + SwizzleInput(*It, RemapChan); + DEBUG((*It).dump()); + } + RSI->Instr->eraseFromParent(); + + // Update RSI + RSI->Instr = Pos; + RSI->RegToChan = UpdatedRegToChan; + RSI->UndefReg = UpdatedUndef; + + return Pos; +} + +void R600VectorRegMerger::RemoveMI(MachineInstr *MI) { + for (InstructionSetMap::iterator It = PreviousRegSeqByReg.begin(), + E = PreviousRegSeqByReg.end(); It != E; ++It) { + std::vector<MachineInstr *> &MIs = (*It).second; + MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end()); + } + for (InstructionSetMap::iterator It = PreviousRegSeqByUndefCount.begin(), + E = PreviousRegSeqByUndefCount.end(); It != E; ++It) { + std::vector<MachineInstr *> &MIs = (*It).second; + MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end()); + } +} + +void R600VectorRegMerger::SwizzleInput(MachineInstr &MI, + const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const { + unsigned Offset; + if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST) + Offset = 2; + else + Offset = 3; + for (unsigned i = 0; i < 4; i++) { + unsigned Swizzle = MI.getOperand(i + Offset).getImm() + 1; + for (unsigned j = 0, e = RemapChan.size(); j < e; j++) { + if (RemapChan[j].first == Swizzle) { + MI.getOperand(i + Offset).setImm(RemapChan[j].second - 1); + break; + } + } + } +} + +bool R600VectorRegMerger::areAllUsesSwizzeable(unsigned Reg) const { + for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg), + E = MRI->use_end(); It != E; ++It) { + if (!canSwizzle(*It)) + return false; + } + return true; +} + +bool R600VectorRegMerger::tryMergeUsingCommonSlot(RegSeqInfo &RSI, + RegSeqInfo &CompatibleRSI, + std::vector<std::pair<unsigned, unsigned> > &RemapChan) { + for (MachineInstr::mop_iterator MOp = RSI.Instr->operands_begin(), + MOE = RSI.Instr->operands_end(); MOp != MOE; ++MOp) { + if (!MOp->isReg()) + continue; + if (PreviousRegSeqByReg[MOp->getReg()].empty()) + continue; + std::vector<MachineInstr *> MIs = PreviousRegSeqByReg[MOp->getReg()]; + for (unsigned i = 0, e = MIs.size(); i < e; i++) { + CompatibleRSI = PreviousRegSeq[MIs[i]]; + if (RSI == CompatibleRSI) + continue; + if (tryMergeVector(&CompatibleRSI, &RSI, RemapChan)) + return true; + } + } + return false; +} + +bool R600VectorRegMerger::tryMergeUsingFreeSlot(RegSeqInfo &RSI, + RegSeqInfo &CompatibleRSI, + std::vector<std::pair<unsigned, unsigned> > &RemapChan) { + unsigned NeededUndefs = 4 - RSI.UndefReg.size(); + if (PreviousRegSeqByUndefCount[NeededUndefs].empty()) + return false; + std::vector<MachineInstr *> &MIs = + PreviousRegSeqByUndefCount[NeededUndefs]; + CompatibleRSI = PreviousRegSeq[MIs.back()]; + tryMergeVector(&CompatibleRSI, &RSI, RemapChan); + return true; +} + +void R600VectorRegMerger::trackRSI(const RegSeqInfo &RSI) { + for (DenseMap<unsigned, unsigned>::const_iterator + It = RSI.RegToChan.begin(), E = RSI.RegToChan.end(); It != E; ++It) { + PreviousRegSeqByReg[(*It).first].push_back(RSI.Instr); + } + PreviousRegSeqByUndefCount[RSI.UndefReg.size()].push_back(RSI.Instr); + PreviousRegSeq[RSI.Instr] = RSI; +} + +bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) { + TII = static_cast<const R600InstrInfo *>(Fn.getTarget().getInstrInfo()); + MRI = &(Fn.getRegInfo()); + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); + MBB != MBBe; ++MBB) { + MachineBasicBlock *MB = MBB; + PreviousRegSeq.clear(); + PreviousRegSeqByReg.clear(); + PreviousRegSeqByUndefCount.clear(); + + for (MachineBasicBlock::iterator MII = MB->begin(), MIIE = MB->end(); + MII != MIIE; ++MII) { + MachineInstr *MI = MII; + if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) + continue; + + RegSeqInfo RSI(*MRI, MI); + + // All uses of MI are swizzeable ? + unsigned Reg = MI->getOperand(0).getReg(); + if (!areAllUsesSwizzeable(Reg)) + continue; + + DEBUG (dbgs() << "Trying to optimize "; + MI->dump(); + ); + + RegSeqInfo CandidateRSI; + std::vector<std::pair<unsigned, unsigned> > RemapChan; + DEBUG(dbgs() << "Using common slots...\n";); + if (tryMergeUsingCommonSlot(RSI, CandidateRSI, RemapChan)) { + // Remove CandidateRSI mapping + RemoveMI(CandidateRSI.Instr); + MII = RebuildVector(&RSI, &CandidateRSI, RemapChan); + trackRSI(RSI); + continue; + } + DEBUG(dbgs() << "Using free slots...\n";); + RemapChan.clear(); + if (tryMergeUsingFreeSlot(RSI, CandidateRSI, RemapChan)) { + RemoveMI(CandidateRSI.Instr); + MII = RebuildVector(&RSI, &CandidateRSI, RemapChan); + trackRSI(RSI); + continue; + } + //Failed to merge + trackRSI(RSI); + } + } + return false; +} + +} + +llvm::FunctionPass *llvm::createR600VectorRegMerger(TargetMachine &tm) { + return new R600VectorRegMerger(tm); +} diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp index 05e96f1..da614c7 100644 --- a/lib/Target/R600/R600Packetizer.cpp +++ b/lib/Target/R600/R600Packetizer.cpp @@ -14,22 +14,21 @@ // //===----------------------------------------------------------------------===// -#ifndef R600PACKETIZER_CPP -#define R600PACKETIZER_CPP - #define DEBUG_TYPE "packets" #include "llvm/Support/Debug.h" -#include "llvm/Support/raw_ostream.h" +#include "AMDGPU.h" +#include "R600InstrInfo.h" #include "llvm/CodeGen/DFAPacketizer.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/ScheduleDAG.h" -#include "AMDGPU.h" -#include "R600InstrInfo.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; -namespace llvm { +namespace { class R600Packetizer : public MachineFunctionPass { @@ -60,37 +59,59 @@ private: const R600InstrInfo *TII; const R600RegisterInfo &TRI; - enum BankSwizzle { - ALU_VEC_012 = 0, - ALU_VEC_021, - ALU_VEC_120, - ALU_VEC_102, - ALU_VEC_201, - ALU_VEC_210 - }; - unsigned getSlot(const MachineInstr *MI) const { return TRI.getHWRegChan(MI->getOperand(0).getReg()); } - std::vector<unsigned> getPreviousVector(MachineBasicBlock::iterator I) const { - std::vector<unsigned> Result; + /// \returns register to PV chan mapping for bundle/single instructions that + /// immediatly precedes I. + DenseMap<unsigned, unsigned> getPreviousVector(MachineBasicBlock::iterator I) + const { + DenseMap<unsigned, unsigned> Result; I--; if (!TII->isALUInstr(I->getOpcode()) && !I->isBundle()) return Result; MachineBasicBlock::instr_iterator BI = I.getInstrIterator(); if (I->isBundle()) BI++; - while (BI->isBundledWithPred() && !TII->isPredicated(BI)) { + do { + if (TII->isPredicated(BI)) + continue; + if (TII->isTransOnly(BI)) + continue; int OperandIdx = TII->getOperandIdx(BI->getOpcode(), R600Operands::WRITE); - if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm()) - Result.push_back(BI->getOperand(0).getReg()); - BI++; - } + if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0) + continue; + unsigned Dst = BI->getOperand(0).getReg(); + if (BI->getOpcode() == AMDGPU::DOT4_r600 || + BI->getOpcode() == AMDGPU::DOT4_eg) { + Result[Dst] = AMDGPU::PV_X; + continue; + } + unsigned PVReg = 0; + switch (TRI.getHWRegChan(Dst)) { + case 0: + PVReg = AMDGPU::PV_X; + break; + case 1: + PVReg = AMDGPU::PV_Y; + break; + case 2: + PVReg = AMDGPU::PV_Z; + break; + case 3: + PVReg = AMDGPU::PV_W; + break; + default: + llvm_unreachable("Invalid Chan"); + } + Result[Dst] = PVReg; + } while ((++BI)->isBundledWithPred()); return Result; } - void substitutePV(MachineInstr *MI, const std::vector<unsigned> &PV) const { + void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs) + const { R600Operands::Ops Ops[] = { R600Operands::SRC0, R600Operands::SRC1, @@ -101,30 +122,9 @@ private: if (OperandIdx < 0) continue; unsigned Src = MI->getOperand(OperandIdx).getReg(); - for (unsigned j = 0, e = PV.size(); j < e; j++) { - if (Src == PV[j]) { - unsigned Chan = TRI.getHWRegChan(Src); - unsigned PVReg; - switch (Chan) { - case 0: - PVReg = AMDGPU::PV_X; - break; - case 1: - PVReg = AMDGPU::PV_Y; - break; - case 2: - PVReg = AMDGPU::PV_Z; - break; - case 3: - PVReg = AMDGPU::PV_W; - break; - default: - llvm_unreachable("Invalid Chan"); - } - MI->getOperand(OperandIdx).setReg(PVReg); - break; - } - } + const DenseMap<unsigned, unsigned>::const_iterator It = PVs.find(Src); + if (It != PVs.end()) + MI->getOperand(OperandIdx).setReg(It->second); } } public: @@ -209,8 +209,11 @@ public: } dbgs() << "because of Consts read limitations\n"; }); - const std::vector<unsigned> &PV = getPreviousVector(MI); - bool FitsReadPortLimits = fitsReadPortLimitation(CurrentPacketMIs, PV); + const DenseMap<unsigned, unsigned> &PV = + getPreviousVector(CurrentPacketMIs.front()); + std::vector<R600InstrInfo::BankSwizzle> BS; + bool FitsReadPortLimits = + TII->fitsReadPortLimitations(CurrentPacketMIs, PV, BS); DEBUG( if (!FitsReadPortLimits) { dbgs() << "Couldn't pack :\n"; @@ -223,6 +226,14 @@ public: dbgs() << "because of Read port limitations\n"; }); bool isBundlable = FitsConstLimits && FitsReadPortLimits; + if (isBundlable) { + for (unsigned i = 0, e = CurrentPacketMIs.size(); i < e; i++) { + MachineInstr *MI = CurrentPacketMIs[i]; + unsigned Op = TII->getOperandIdx(MI->getOpcode(), + R600Operands::BANK_SWIZZLE); + MI->getOperand(Op).setImm(BS[i]); + } + } CurrentPacketMIs.pop_back(); if (!isBundlable) { endPacket(MI->getParent(), MI); @@ -234,133 +245,6 @@ public: substitutePV(MI, PV); return VLIWPacketizerList::addToPacket(MI); } -private: - std::vector<std::pair<int, unsigned> > - ExtractSrcs(const MachineInstr *MI, const std::vector<unsigned> &PV) const { - R600Operands::Ops Ops[] = { - R600Operands::SRC0, - R600Operands::SRC1, - R600Operands::SRC2 - }; - std::vector<std::pair<int, unsigned> > Result; - for (unsigned i = 0; i < 3; i++) { - int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]); - if (OperandIdx < 0){ - Result.push_back(std::pair<int, unsigned>(-1,0)); - continue; - } - unsigned Src = MI->getOperand(OperandIdx).getReg(); - if (std::find(PV.begin(), PV.end(), Src) != PV.end()) { - Result.push_back(std::pair<int, unsigned>(-1,0)); - continue; - } - unsigned Reg = TRI.getEncodingValue(Src) & 0xff; - if (Reg > 127) { - Result.push_back(std::pair<int, unsigned>(-1,0)); - continue; - } - unsigned Chan = TRI.getHWRegChan(Src); - Result.push_back(std::pair<int, unsigned>(Reg, Chan)); - } - return Result; - } - - std::vector<std::pair<int, unsigned> > - Swizzle(std::vector<std::pair<int, unsigned> > Src, - BankSwizzle Swz) const { - switch (Swz) { - case ALU_VEC_012: - break; - case ALU_VEC_021: - std::swap(Src[1], Src[2]); - break; - case ALU_VEC_102: - std::swap(Src[0], Src[1]); - break; - case ALU_VEC_120: - std::swap(Src[0], Src[1]); - std::swap(Src[0], Src[2]); - break; - case ALU_VEC_201: - std::swap(Src[0], Src[2]); - std::swap(Src[0], Src[1]); - break; - case ALU_VEC_210: - std::swap(Src[0], Src[2]); - break; - } - return Src; - } - - bool isLegal(const std::vector<MachineInstr *> &IG, - const std::vector<BankSwizzle> &Swz, - const std::vector<unsigned> &PV) const { - assert (Swz.size() == IG.size()); - int Vector[4][3]; - memset(Vector, -1, sizeof(Vector)); - for (unsigned i = 0, e = IG.size(); i < e; i++) { - const std::vector<std::pair<int, unsigned> > &Srcs = - Swizzle(ExtractSrcs(IG[i], PV), Swz[i]); - for (unsigned j = 0; j < 3; j++) { - const std::pair<int, unsigned> &Src = Srcs[j]; - if (Src.first < 0) - continue; - if (Vector[Src.second][j] < 0) - Vector[Src.second][j] = Src.first; - if (Vector[Src.second][j] != Src.first) - return false; - } - } - return true; - } - - bool recursiveFitsFPLimitation( - std::vector<MachineInstr *> IG, - const std::vector<unsigned> &PV, - std::vector<BankSwizzle> &SwzCandidate, - std::vector<MachineInstr *> CurrentlyChecked) - const { - if (!isLegal(CurrentlyChecked, SwzCandidate, PV)) - return false; - if (IG.size() == CurrentlyChecked.size()) { - return true; - } - BankSwizzle AvailableSwizzle[] = { - ALU_VEC_012, - ALU_VEC_021, - ALU_VEC_120, - ALU_VEC_102, - ALU_VEC_201, - ALU_VEC_210 - }; - CurrentlyChecked.push_back(IG[CurrentlyChecked.size()]); - for (unsigned i = 0; i < 6; i++) { - SwzCandidate.push_back(AvailableSwizzle[i]); - if (recursiveFitsFPLimitation(IG, PV, SwzCandidate, CurrentlyChecked)) - return true; - SwzCandidate.pop_back(); - } - return false; - } - - bool fitsReadPortLimitation( - std::vector<MachineInstr *> IG, - const std::vector<unsigned> &PV) - const { - //Todo : support shared src0 - src1 operand - std::vector<BankSwizzle> SwzCandidate; - bool Result = recursiveFitsFPLimitation(IG, PV, SwzCandidate, - std::vector<MachineInstr *>()); - if (!Result) - return false; - for (unsigned i = 0, e = IG.size(); i < e; i++) { - MachineInstr *MI = IG[i]; - unsigned Op = TII->getOperandIdx(MI->getOpcode(), - R600Operands::BANK_SWIZZLE); - MI->getOperand(Op).setImm(SwzCandidate[i]); - } - return true; - } }; bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) { @@ -437,10 +321,8 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) { } -} +} // end anonymous namespace llvm::FunctionPass *llvm::createR600Packetizer(TargetMachine &tm) { return new R600Packetizer(tm); } - -#endif // R600PACKETIZER_CPP diff --git a/lib/Target/R600/R600RegisterInfo.cpp b/lib/Target/R600/R600RegisterInfo.cpp index bbd7995..a42043b 100644 --- a/lib/Target/R600/R600RegisterInfo.cpp +++ b/lib/Target/R600/R600RegisterInfo.cpp @@ -20,12 +20,10 @@ using namespace llvm; -R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm, - const TargetInstrInfo &tii) -: AMDGPURegisterInfo(tm, tii), - TM(tm), - TII(tii) - { } +R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm) +: AMDGPURegisterInfo(tm), + TM(tm) + { RCW.RegWeight = 0; RCW.WeightLimit = 0;} BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); @@ -55,7 +53,8 @@ BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(*I); } - const R600InstrInfo *RII = static_cast<const R600InstrInfo*>(&TII); + const R600InstrInfo *RII = + static_cast<const R600InstrInfo*>(TM.getInstrInfo()); std::vector<unsigned> IndirectRegs = RII->getIndirectReservedRegs(MF); for (std::vector<unsigned>::iterator I = IndirectRegs.begin(), E = IndirectRegs.end(); @@ -97,3 +96,7 @@ unsigned R600RegisterInfo::getSubRegFromChannel(unsigned Channel) const { } } +const RegClassWeight &R600RegisterInfo::getRegClassWeight( + const TargetRegisterClass *RC) const { + return RCW; +} diff --git a/lib/Target/R600/R600RegisterInfo.h b/lib/Target/R600/R600RegisterInfo.h index f9ca918..9b286ee 100644 --- a/lib/Target/R600/R600RegisterInfo.h +++ b/lib/Target/R600/R600RegisterInfo.h @@ -21,13 +21,12 @@ namespace llvm { class R600TargetMachine; -class TargetInstrInfo; struct R600RegisterInfo : public AMDGPURegisterInfo { AMDGPUTargetMachine &TM; - const TargetInstrInfo &TII; + RegClassWeight RCW; - R600RegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii); + R600RegisterInfo(AMDGPUTargetMachine &tm); virtual BitVector getReservedRegs(const MachineFunction &MF) const; @@ -48,6 +47,8 @@ struct R600RegisterInfo : public AMDGPURegisterInfo { /// (e.g. getSubRegFromChannel(0) -> AMDGPU::sel_x) unsigned getSubRegFromChannel(unsigned Channel) const; + virtual const RegClassWeight &getRegClassWeight(const TargetRegisterClass *RC) const; + }; } // End namespace llvm diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/R600/R600RegisterInfo.td index 5a2e65c..a8b9b70 100644 --- a/lib/Target/R600/R600RegisterInfo.td +++ b/lib/Target/R600/R600RegisterInfo.td @@ -35,7 +35,7 @@ foreach Index = 0-127 in { Chan>; } // 128-bit Temporary Registers - def T#Index#_XYZW : R600Reg_128 <"T"#Index#".XYZW", + def T#Index#_XYZW : R600Reg_128 <"T"#Index#"", [!cast<Register>("T"#Index#"_X"), !cast<Register>("T"#Index#"_Y"), !cast<Register>("T"#Index#"_Z"), @@ -89,13 +89,13 @@ def ONE_INT : R600Reg<"1", 250>; def HALF : R600Reg<"0.5", 252>; def NEG_HALF : R600Reg<"-0.5", 252>; def ALU_LITERAL_X : R600RegWithChan<"literal.x", 253, "X">; -def ALU_LITERAL_Y : R600RegWithChan<"literal.x", 253, "Y">; -def ALU_LITERAL_Z : R600RegWithChan<"literal.x", 253, "Z">; -def ALU_LITERAL_W : R600RegWithChan<"literal.x", 253, "W">; -def PV_X : R600RegWithChan<"PV.x", 254, "X">; -def PV_Y : R600RegWithChan<"PV.y", 254, "Y">; -def PV_Z : R600RegWithChan<"PV.z", 254, "Z">; -def PV_W : R600RegWithChan<"PV.w", 254, "W">; +def ALU_LITERAL_Y : R600RegWithChan<"literal.y", 253, "Y">; +def ALU_LITERAL_Z : R600RegWithChan<"literal.z", 253, "Z">; +def ALU_LITERAL_W : R600RegWithChan<"literal.w", 253, "W">; +def PV_X : R600RegWithChan<"PV.X", 254, "X">; +def PV_Y : R600RegWithChan<"PV.Y", 254, "Y">; +def PV_Z : R600RegWithChan<"PV.Z", 254, "Z">; +def PV_W : R600RegWithChan<"PV.W", 254, "W">; def PREDICATE_BIT : R600Reg<"PredicateBit", 0>; def PRED_SEL_OFF: R600Reg<"Pred_sel_off", 0>; def PRED_SEL_ZERO : R600Reg<"Pred_sel_zero", 2>; diff --git a/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp new file mode 100644 index 0000000..3768ba0 --- /dev/null +++ b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp @@ -0,0 +1,301 @@ +//===-- R600TextureIntrinsicsReplacer.cpp ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This pass translates tgsi-like texture intrinsics into R600 texture +/// closer to hardware intrinsics. +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/Passes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/InstVisitor.h" + +using namespace llvm; + +namespace { +class R600TextureIntrinsicsReplacer : + public FunctionPass, public InstVisitor<R600TextureIntrinsicsReplacer> { + static char ID; + + Module *Mod; + Type *FloatType; + Type *Int32Type; + Type *V4f32Type; + Type *V4i32Type; + FunctionType *TexSign; + FunctionType *TexQSign; + + void getAdjustementFromTextureTarget(unsigned TextureType, bool hasLOD, + unsigned SrcSelect[4], unsigned CT[4], + bool &useShadowVariant) { + enum TextureTypes { + TEXTURE_1D = 1, + TEXTURE_2D, + TEXTURE_3D, + TEXTURE_CUBE, + TEXTURE_RECT, + TEXTURE_SHADOW1D, + TEXTURE_SHADOW2D, + TEXTURE_SHADOWRECT, + TEXTURE_1D_ARRAY, + TEXTURE_2D_ARRAY, + TEXTURE_SHADOW1D_ARRAY, + TEXTURE_SHADOW2D_ARRAY, + TEXTURE_SHADOWCUBE, + TEXTURE_2D_MSAA, + TEXTURE_2D_ARRAY_MSAA, + TEXTURE_CUBE_ARRAY, + TEXTURE_SHADOWCUBE_ARRAY + }; + + switch (TextureType) { + case 0: + return; + case TEXTURE_RECT: + case TEXTURE_1D: + case TEXTURE_2D: + case TEXTURE_3D: + case TEXTURE_CUBE: + case TEXTURE_1D_ARRAY: + case TEXTURE_2D_ARRAY: + case TEXTURE_CUBE_ARRAY: + case TEXTURE_2D_MSAA: + case TEXTURE_2D_ARRAY_MSAA: + useShadowVariant = false; + break; + case TEXTURE_SHADOW1D: + case TEXTURE_SHADOW2D: + case TEXTURE_SHADOWRECT: + case TEXTURE_SHADOW1D_ARRAY: + case TEXTURE_SHADOW2D_ARRAY: + case TEXTURE_SHADOWCUBE: + case TEXTURE_SHADOWCUBE_ARRAY: + useShadowVariant = true; + break; + default: + llvm_unreachable("Unknow Texture Type"); + } + + if (TextureType == TEXTURE_RECT || + TextureType == TEXTURE_SHADOWRECT) { + CT[0] = 0; + CT[1] = 0; + } + + if (TextureType == TEXTURE_CUBE_ARRAY || + TextureType == TEXTURE_SHADOWCUBE_ARRAY) { + CT[2] = 0; + } + + if (TextureType == TEXTURE_1D_ARRAY || + TextureType == TEXTURE_SHADOW1D_ARRAY) { + if (hasLOD && useShadowVariant) { + CT[1] = 0; + } else { + CT[2] = 0; + SrcSelect[2] = 1; + } + } else if (TextureType == TEXTURE_2D_ARRAY || + TextureType == TEXTURE_SHADOW2D_ARRAY) { + CT[2] = 0; + } + + if ((TextureType == TEXTURE_SHADOW1D || + TextureType == TEXTURE_SHADOW2D || + TextureType == TEXTURE_SHADOWRECT || + TextureType == TEXTURE_SHADOW1D_ARRAY) && + !(hasLOD && useShadowVariant)) { + SrcSelect[3] = 2; + } + } + + void ReplaceCallInst(CallInst &I, FunctionType *FT, const char *Name, + unsigned SrcSelect[4], Value *Offset[3], Value *Resource, + Value *Sampler, unsigned CT[4], Value *Coord) { + IRBuilder<> Builder(&I); + Constant *Mask[] = { + ConstantInt::get(Int32Type, SrcSelect[0]), + ConstantInt::get(Int32Type, SrcSelect[1]), + ConstantInt::get(Int32Type, SrcSelect[2]), + ConstantInt::get(Int32Type, SrcSelect[3]) + }; + Value *SwizzleMask = ConstantVector::get(Mask); + Value *SwizzledCoord = + Builder.CreateShuffleVector(Coord, Coord, SwizzleMask); + + Value *Args[] = { + SwizzledCoord, + Offset[0], + Offset[1], + Offset[2], + Resource, + Sampler, + ConstantInt::get(Int32Type, CT[0]), + ConstantInt::get(Int32Type, CT[1]), + ConstantInt::get(Int32Type, CT[2]), + ConstantInt::get(Int32Type, CT[3]) + }; + + Function *F = Mod->getFunction(Name); + if (!F) { + F = Function::Create(FT, GlobalValue::ExternalLinkage, Name, Mod); + F->addFnAttr(Attribute::ReadNone); + } + I.replaceAllUsesWith(Builder.CreateCall(F, Args)); + I.eraseFromParent(); + } + + void ReplaceTexIntrinsic(CallInst &I, bool hasLOD, FunctionType *FT, + const char *VanillaInt, + const char *ShadowInt) { + Value *Coord = I.getArgOperand(0); + Value *ResourceId = I.getArgOperand(1); + Value *SamplerId = I.getArgOperand(2); + + unsigned TextureType = + dyn_cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); + + unsigned SrcSelect[4] = { 0, 1, 2, 3 }; + unsigned CT[4] = {1, 1, 1, 1}; + Value *Offset[3] = { + ConstantInt::get(Int32Type, 0), + ConstantInt::get(Int32Type, 0), + ConstantInt::get(Int32Type, 0) + }; + bool useShadowVariant; + + getAdjustementFromTextureTarget(TextureType, hasLOD, SrcSelect, CT, + useShadowVariant); + + ReplaceCallInst(I, FT, useShadowVariant?ShadowInt:VanillaInt, SrcSelect, + Offset, ResourceId, SamplerId, CT, Coord); + } + + void ReplaceTXF(CallInst &I) { + Value *Coord = I.getArgOperand(0); + Value *ResourceId = I.getArgOperand(4); + Value *SamplerId = I.getArgOperand(5); + + unsigned TextureType = + dyn_cast<ConstantInt>(I.getArgOperand(6))->getZExtValue(); + + unsigned SrcSelect[4] = { 0, 1, 2, 3 }; + unsigned CT[4] = {1, 1, 1, 1}; + Value *Offset[3] = { + I.getArgOperand(1), + I.getArgOperand(2), + I.getArgOperand(3), + }; + bool useShadowVariant; + + getAdjustementFromTextureTarget(TextureType, false, SrcSelect, CT, + useShadowVariant); + + ReplaceCallInst(I, TexQSign, "llvm.R600.txf", SrcSelect, + Offset, ResourceId, SamplerId, CT, Coord); + } + +public: + R600TextureIntrinsicsReplacer(): + FunctionPass(ID) { + } + + virtual bool doInitialization(Module &M) { + LLVMContext &Ctx = M.getContext(); + Mod = &M; + FloatType = Type::getFloatTy(Ctx); + Int32Type = Type::getInt32Ty(Ctx); + V4f32Type = VectorType::get(FloatType, 4); + V4i32Type = VectorType::get(Int32Type, 4); + Type *ArgsType[] = { + V4f32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + }; + TexSign = FunctionType::get(V4f32Type, ArgsType, /*isVarArg=*/false); + Type *ArgsQType[] = { + V4i32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + Int32Type, + }; + TexQSign = FunctionType::get(V4f32Type, ArgsQType, /*isVarArg=*/false); + return false; + } + + virtual bool runOnFunction(Function &F) { + visit(F); + return false; + } + + virtual const char *getPassName() const { + return "R600 Texture Intrinsics Replacer"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const { + } + + void visitCallInst(CallInst &I) { + StringRef Name = I.getCalledFunction()->getName(); + if (Name == "llvm.AMDGPU.tex") { + ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.tex", "llvm.R600.texc"); + return; + } + if (Name == "llvm.AMDGPU.txl") { + ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txl", "llvm.R600.txlc"); + return; + } + if (Name == "llvm.AMDGPU.txb") { + ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txb", "llvm.R600.txbc"); + return; + } + if (Name == "llvm.AMDGPU.txf") { + ReplaceTXF(I); + return; + } + if (Name == "llvm.AMDGPU.txq") { + ReplaceTexIntrinsic(I, false, TexQSign, "llvm.R600.txq", "llvm.R600.txq"); + return; + } + if (Name == "llvm.AMDGPU.ddx") { + ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddx", "llvm.R600.ddx"); + return; + } + if (Name == "llvm.AMDGPU.ddy") { + ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddy", "llvm.R600.ddy"); + return; + } + } + +}; + +char R600TextureIntrinsicsReplacer::ID = 0; + +} + +FunctionPass *llvm::createR600TextureIntrinsicsReplacer() { + return new R600TextureIntrinsicsReplacer(); +} diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/R600/SIAnnotateControlFlow.cpp index 2477e2a..9791ef4 100644 --- a/lib/Target/R600/SIAnnotateControlFlow.cpp +++ b/lib/Target/R600/SIAnnotateControlFlow.cpp @@ -15,6 +15,8 @@ #include "AMDGPU.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/Analysis/Dominators.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 1a07aff..d74f401 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -13,24 +13,23 @@ //===----------------------------------------------------------------------===// #include "SIISelLowering.h" -#include "AMDIL.h" #include "AMDGPU.h" #include "AMDILIntrinsicInfo.h" #include "SIInstrInfo.h" #include "SIMachineFunctionInfo.h" #include "SIRegisterInfo.h" -#include "llvm/IR/Function.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/IR/Function.h" + +const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL; using namespace llvm; SITargetLowering::SITargetLowering(TargetMachine &TM) : - AMDGPUTargetLowering(TM), - TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())), - TRI(TM.getRegisterInfo()) { + AMDGPUTargetLowering(TM) { addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass); addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); @@ -72,8 +71,9 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); - setOperationAction(ISD::STORE, MVT::i32, Custom); - setOperationAction(ISD::STORE, MVT::i64, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom); + + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); setTargetDAGCombine(ISD::SELECT_CC); @@ -82,12 +82,29 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : setSchedulingPreference(Sched::RegPressure); } +SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, + SDLoc DL, SDValue Chain, + unsigned Offset) const { + MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); + PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), + AMDGPUAS::CONSTANT_ADDRESS); + EVT ArgVT = MVT::getIntegerVT(VT.getSizeInBits()); + SDValue BasePtr = DAG.getCopyFromReg(Chain, DL, + MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64); + SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, + DAG.getConstant(Offset, MVT::i64)); + return DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, Chain, Ptr, + MachinePointerInfo(UndefValue::get(PtrTy)), + VT, false, false, ArgVT.getSizeInBits() >> 3); + +} + SDValue SITargetLowering::LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); @@ -103,8 +120,8 @@ SDValue SITargetLowering::LowerFormalArguments( for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { const ISD::InputArg &Arg = Ins[i]; - - // First check if it's a PS input addr + + // First check if it's a PS input addr if (Info->ShaderType == ShaderType::PIXEL && !Arg.Flags.isInReg()) { assert((PSInputNum <= 15) && "Too many PS inputs!"); @@ -120,7 +137,7 @@ SDValue SITargetLowering::LowerFormalArguments( } // Second split vertices into their elements - if (Arg.VT.isVector()) { + if (Info->ShaderType != ShaderType::COMPUTE && Arg.VT.isVector()) { ISD::InputArg NewArg = Arg; NewArg.Flags.setSplit(); NewArg.VT = Arg.VT.getVectorElementType(); @@ -152,20 +169,37 @@ SDValue SITargetLowering::LowerFormalArguments( CCInfo.AllocateReg(AMDGPU::VGPR1); } + // The pointer to the list of arguments is stored in SGPR0, SGPR1 + if (Info->ShaderType == ShaderType::COMPUTE) { + CCInfo.AllocateReg(AMDGPU::SGPR0); + CCInfo.AllocateReg(AMDGPU::SGPR1); + MF.addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass); + } + AnalyzeFormalArguments(CCInfo, Splits); for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { + const ISD::InputArg &Arg = Ins[i]; if (Skipped & (1 << i)) { - InVals.push_back(SDValue()); + InVals.push_back(DAG.getUNDEF(Arg.VT)); continue; } CCValAssign &VA = ArgLocs[ArgIdx++]; + EVT VT = VA.getLocVT(); + + if (VA.isMemLoc()) { + // The first 36 bytes of the input buffer contains information about + // thread group and global sizes. + SDValue Arg = LowerParameter(DAG, VT, DL, DAG.getRoot(), + 36 + VA.getLocMemOffset()); + InVals.push_back(Arg); + continue; + } assert(VA.isRegLoc() && "Parameter must be in a register!"); unsigned Reg = VA.getLocReg(); - MVT VT = VA.getLocVT(); if (VT == MVT::i64) { // For now assume it is a pointer @@ -181,7 +215,6 @@ SDValue SITargetLowering::LowerFormalArguments( Reg = MF.addLiveIn(Reg, RC); SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); - const ISD::InputArg &Arg = Ins[i]; if (Arg.VT.isVector()) { // Build a vector from the registers @@ -200,7 +233,7 @@ SDValue SITargetLowering::LowerFormalArguments( NumElements = Arg.VT.getVectorNumElements() - NumElements; for (unsigned j = 0; j != NumElements; ++j) Regs.push_back(DAG.getUNDEF(VT)); - + InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs.data(), Regs.size())); continue; @@ -214,15 +247,45 @@ SDValue SITargetLowering::LowerFormalArguments( MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( MachineInstr * MI, MachineBasicBlock * BB) const { + MachineBasicBlock::iterator I = *MI; + switch (MI->getOpcode()) { default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); case AMDGPU::BRANCH: return BB; + case AMDGPU::SI_ADDR64_RSRC: { + const SIInstrInfo *TII = + static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo()); + MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); + unsigned SuperReg = MI->getOperand(0).getReg(); + unsigned SubRegLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned SubRegHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned SubRegHiHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); + unsigned SubRegHiLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); + BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), SubRegLo) + .addOperand(MI->getOperand(1)); + BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiLo) + .addImm(0); + BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiHi) + .addImm(RSRC_DATA_FORMAT >> 32); + BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SubRegHi) + .addReg(SubRegHiLo) + .addImm(AMDGPU::sub0) + .addReg(SubRegHiHi) + .addImm(AMDGPU::sub1); + BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SuperReg) + .addReg(SubRegLo) + .addImm(AMDGPU::sub0_sub1) + .addReg(SubRegHi) + .addImm(AMDGPU::sub2_sub3); + MI->eraseFromParent(); + break; + } } return BB; } -EVT SITargetLowering::getSetCCResultType(EVT VT) const { +EVT SITargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { return MVT::i1; } @@ -239,7 +302,55 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); - case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG); + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntrinsicID = + cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + EVT VT = Op.getValueType(); + SDLoc DL(Op); + //XXX: Hardcoded we only use two to store the pointer to the parameters. + unsigned NumUserSGPRs = 2; + switch (IntrinsicID) { + default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); + case Intrinsic::r600_read_ngroups_x: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 0); + case Intrinsic::r600_read_ngroups_y: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 4); + case Intrinsic::r600_read_ngroups_z: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 8); + case Intrinsic::r600_read_global_size_x: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 12); + case Intrinsic::r600_read_global_size_y: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 16); + case Intrinsic::r600_read_global_size_z: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 20); + case Intrinsic::r600_read_local_size_x: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 24); + case Intrinsic::r600_read_local_size_y: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 28); + case Intrinsic::r600_read_local_size_z: + return LowerParameter(DAG, VT, DL, DAG.getEntryNode(), 32); + case Intrinsic::r600_read_tgid_x: + return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, + AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT); + case Intrinsic::r600_read_tgid_y: + return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, + AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 1), VT); + case Intrinsic::r600_read_tgid_z: + return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, + AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 2), VT); + case Intrinsic::r600_read_tidig_x: + return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass, + AMDGPU::VGPR0, VT); + case Intrinsic::r600_read_tidig_y: + return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass, + AMDGPU::VGPR1, VT); + case Intrinsic::r600_read_tidig_z: + return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass, + AMDGPU::VGPR2, VT); + + } + } } return SDValue(); } @@ -265,7 +376,7 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) { SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, SelectionDAG &DAG) const { - DebugLoc DL = BRCOND.getDebugLoc(); + SDLoc DL(BRCOND); SDNode *Intr = BRCOND.getOperand(1).getNode(); SDValue Target = BRCOND.getOperand(2); @@ -338,32 +449,6 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, return Chain; } -#define RSRC_DATA_FORMAT 0xf00000000000 - -SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { - StoreSDNode *StoreNode = cast<StoreSDNode>(Op); - SDValue Chain = Op.getOperand(0); - SDValue Value = Op.getOperand(1); - SDValue VirtualAddress = Op.getOperand(2); - DebugLoc DL = Op.getDebugLoc(); - - if (StoreNode->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) { - return SDValue(); - } - - SDValue SrcSrc = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, - DAG.getConstant(0, MVT::i64), - DAG.getConstant(RSRC_DATA_FORMAT, MVT::i64)); - - SDValue Ops[2]; - Ops[0] = DAG.getNode(AMDGPUISD::BUFFER_STORE, DL, MVT::Other, Chain, - Value, SrcSrc, VirtualAddress); - Ops[1] = Chain; - - return DAG.getMergeValues(Ops, 2, DL); - -} - SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); @@ -371,7 +456,7 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue False = Op.getOperand(3); SDValue CC = Op.getOperand(4); EVT VT = Op.getValueType(); - DebugLoc DL = Op.getDebugLoc(); + SDLoc DL(Op); // Possible Min/Max pattern SDValue MinMax = LowerMinMax(Op, DAG); @@ -383,6 +468,21 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False); } +SDValue SITargetLowering::LowerSIGN_EXTEND(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc DL(Op); + + if (VT != MVT::i64) { + return SDValue(); + } + + SDValue Hi = DAG.getNode(ISD::SRA, DL, MVT::i32, Op.getOperand(0), + DAG.getConstant(31, MVT::i32)); + + return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), Hi); +} + //===----------------------------------------------------------------------===// // Custom DAG optimizations //===----------------------------------------------------------------------===// @@ -390,7 +490,7 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue SITargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; - DebugLoc DL = N->getDebugLoc(); + SDLoc DL(N); EVT VT = N->getValueType(0); switch (N->getOpcode()) { @@ -433,13 +533,13 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } -/// \brief Test if RegClass is one of the VSrc classes +/// \brief Test if RegClass is one of the VSrc classes static bool isVSrc(unsigned RegClass) { return AMDGPU::VSrc_32RegClassID == RegClass || AMDGPU::VSrc_64RegClassID == RegClass; } -/// \brief Test if RegClass is one of the SSrc classes +/// \brief Test if RegClass is one of the SSrc classes static bool isSSrc(unsigned RegClass) { return AMDGPU::SSrc_32RegClassID == RegClass || AMDGPU::SSrc_64RegClassID == RegClass; @@ -481,6 +581,8 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate, bool &ScalarSlotUsed) const { MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand); + const SIInstrInfo *TII = + static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo()); if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode())) return false; @@ -513,20 +615,33 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate, } /// \brief Does "Op" fit into register class "RegClass" ? -bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, SDValue &Op, +bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, const SDValue &Op, unsigned RegClass) const { - MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); + MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); SDNode *Node = Op.getNode(); const TargetRegisterClass *OpClass; + const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) { + const SIInstrInfo *TII = + static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo()); const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode()); int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass; - if (OpClassID == -1) - OpClass = getRegClassFor(Op.getSimpleValueType()); - else + if (OpClassID == -1) { + switch (MN->getMachineOpcode()) { + case AMDGPU::REG_SEQUENCE: + // Operand 0 is the register class id for REG_SEQUENCE instructions. + OpClass = TRI->getRegClass( + cast<ConstantSDNode>(MN->getOperand(0))->getZExtValue()); + break; + default: + OpClass = getRegClassFor(Op.getSimpleValueType()); + break; + } + } else { OpClass = TRI->getRegClass(OpClassID); + } } else if (Node->getOpcode() == ISD::CopyFromReg) { RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode()); @@ -564,17 +679,30 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, // This is a conservative aproach, it is possible that we can't determine // the correct register class and copy too often, but better save than sorry. SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32); - SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DebugLoc(), + SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDLoc(), Operand.getValueType(), Operand, RC); Operand = SDValue(Node, 0); } +/// \returns true if \p Node's operands are different from the SDValue list +/// \p Ops +static bool isNodeChanged(const SDNode *Node, const std::vector<SDValue> &Ops) { + for (unsigned i = 0, e = Node->getNumOperands(); i < e; ++i) { + if (Ops[i].getNode() != Node->getOperand(i).getNode()) { + return true; + } + } + return false; +} + /// \brief Try to fold the Nodes operands into the Node SDNode *SITargetLowering::foldOperands(MachineSDNode *Node, SelectionDAG &DAG) const { // Original encoding (either e32 or e64) int Opcode = Node->getMachineOpcode(); + const SIInstrInfo *TII = + static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo()); const MCInstrDesc *Desc = &TII->get(Opcode); unsigned NumDefs = Desc->getNumDefs(); @@ -700,13 +828,19 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node, for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i) Ops.push_back(Node->getOperand(i)); + // Nodes that have a glue result are not CSE'd by getMachineNode(), so in + // this case a brand new node is always be created, even if the operands + // are the same as before. So, manually check if anything has been changed. + if (Desc->Opcode == Opcode && !isNodeChanged(Node, Ops)) { + return Node; + } + // Create a complete new instruction - return DAG.getMachineNode(Desc->Opcode, Node->getDebugLoc(), - Node->getVTList(), Ops); + return DAG.getMachineNode(Desc->Opcode, SDLoc(Node), Node->getVTList(), Ops); } /// \brief Helper function for adjustWritemask -unsigned SubIdx2Lane(unsigned Idx) { +static unsigned SubIdx2Lane(unsigned Idx) { switch (Idx) { default: return 0; case AMDGPU::sub0: return 0; @@ -756,7 +890,7 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node, if (Writemask == (1U << Lane)) { SDValue RC = DAG.getTargetConstant(AMDGPU::VReg_32RegClassID, MVT::i32); SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, - DebugLoc(), MVT::f32, + SDLoc(), Users[Lane]->getValueType(0), SDValue(Node, 0), RC); DAG.ReplaceAllUsesWith(Users[Lane], Copy); return; @@ -784,6 +918,7 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node, /// \brief Fold the instructions after slecting them SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, SelectionDAG &DAG) const { + Node = AdjustRegClass(Node, DAG); if (AMDGPU::isMIMG(Node->getMachineOpcode()) != -1) adjustWritemask(Node, DAG); @@ -815,3 +950,62 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); MRI.setRegClass(VReg, RC); } + +MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N, + SelectionDAG &DAG) const { + + SDLoc DL(N); + unsigned NewOpcode = N->getMachineOpcode(); + + switch (N->getMachineOpcode()) { + default: return N; + case AMDGPU::REG_SEQUENCE: { + // MVT::i128 only use SGPRs, so i128 REG_SEQUENCEs don't need to be + // rewritten. + if (N->getValueType(0) == MVT::i128) { + return N; + } + const SDValue Ops[] = { + DAG.getTargetConstant(AMDGPU::VReg_64RegClassID, MVT::i32), + N->getOperand(1) , N->getOperand(2), + N->getOperand(3), N->getOperand(4) + }; + return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::i64, Ops); + } + + case AMDGPU::S_LOAD_DWORD_IMM: + NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64; + // Fall-through + case AMDGPU::S_LOAD_DWORDX2_SGPR: + if (NewOpcode == N->getMachineOpcode()) { + NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64; + } + // Fall-through + case AMDGPU::S_LOAD_DWORDX4_IMM: + case AMDGPU::S_LOAD_DWORDX4_SGPR: { + if (NewOpcode == N->getMachineOpcode()) { + NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64; + } + if (fitsRegClass(DAG, N->getOperand(0), AMDGPU::SReg_64RegClassID)) { + return N; + } + ConstantSDNode *Offset = cast<ConstantSDNode>(N->getOperand(1)); + SDValue Ops[] = { + SDValue(DAG.getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::i128, + DAG.getConstant(0, MVT::i64)), 0), + N->getOperand(0), + DAG.getConstant(Offset->getSExtValue() << 2, MVT::i32) + }; + return DAG.getMachineNode(NewOpcode, DL, N->getVTList(), Ops); + } + } +} + +SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, + const TargetRegisterClass *RC, + unsigned Reg, EVT VT) const { + SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); + + return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), + cast<RegisterSDNode>(VReg)->getReg(), VT); +} diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h index de637be..78ae6a1 100644 --- a/lib/Target/R600/SIISelLowering.h +++ b/lib/Target/R600/SIISelLowering.h @@ -21,21 +21,22 @@ namespace llvm { class SITargetLowering : public AMDGPUTargetLowering { - const SIInstrInfo * TII; - const TargetRegisterInfo * TRI; - - SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerParameter(SelectionDAG &DAG, EVT VT, SDLoc DL, + SDValue Chain, unsigned Offset) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; bool foldImm(SDValue &Operand, int32_t &Immediate, bool &ScalarSlotUsed) const; - bool fitsRegClass(SelectionDAG &DAG, SDValue &Op, unsigned RegClass) const; - void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, + bool fitsRegClass(SelectionDAG &DAG, const SDValue &Op, + unsigned RegClass) const; + void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, unsigned RegClass, bool &ScalarSlotUsed) const; SDNode *foldOperands(MachineSDNode *N, SelectionDAG &DAG) const; void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const; + MachineSDNode *AdjustRegClass(MachineSDNode *N, SelectionDAG &DAG) const; public: SITargetLowering(TargetMachine &tm); @@ -43,12 +44,12 @@ public: SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc DL, SelectionDAG &DAG, + SDLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI, MachineBasicBlock * BB) const; - virtual EVT getSetCCResultType(EVT VT) const; + virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; virtual MVT getScalarShiftAmountTy(EVT VT) const; virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; @@ -57,6 +58,8 @@ public: SDNode *Node) const; int32_t analyzeImmediate(const SDNode *N) const; + SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, + unsigned Reg, EVT VT) const; }; } // End namespace llvm diff --git a/lib/Target/R600/SIInsertWaits.cpp b/lib/Target/R600/SIInsertWaits.cpp index 98bd3db..c36e1dc 100644 --- a/lib/Target/R600/SIInsertWaits.cpp +++ b/lib/Target/R600/SIInsertWaits.cpp @@ -47,7 +47,7 @@ class SIInsertWaits : public MachineFunctionPass { private: static char ID; const SIInstrInfo *TII; - const SIRegisterInfo &TRI; + const SIRegisterInfo *TRI; const MachineRegisterInfo *MRI; /// \brief Constant hardware limits @@ -97,8 +97,8 @@ private: public: SIInsertWaits(TargetMachine &tm) : MachineFunctionPass(ID), - TII(static_cast<const SIInstrInfo*>(tm.getInstrInfo())), - TRI(TII->getRegisterInfo()) { } + TII(0), + TRI(0) { } virtual bool runOnMachineFunction(MachineFunction &MF); @@ -137,7 +137,7 @@ Counters SIInsertWaits::getHwCounts(MachineInstr &MI) { assert(Op.isReg() && "First LGKM operand must be a register!"); unsigned Reg = Op.getReg(); - unsigned Size = TRI.getMinimalPhysRegClass(Reg)->getSize(); + unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize(); Result.Named.LGKM = Size > 4 ? 2 : 1; } else { @@ -182,12 +182,12 @@ RegInterval SIInsertWaits::getRegInterval(MachineOperand &Op) { return std::make_pair(0, 0); unsigned Reg = Op.getReg(); - unsigned Size = TRI.getMinimalPhysRegClass(Reg)->getSize(); + unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize(); assert(Size >= 4); RegInterval Result; - Result.first = TRI.getEncodingValue(Reg); + Result.first = TRI->getEncodingValue(Reg); Result.second = Result.first + Size / 4; return Result; @@ -328,9 +328,11 @@ Counters SIInsertWaits::handleOperands(MachineInstr &MI) { } bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) { - bool Changes = false; + TII = static_cast<const SIInstrInfo*>(MF.getTarget().getInstrInfo()); + TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo()); + MRI = &MF.getRegInfo(); WaitedOn = ZeroCounts; diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td index f737ddd..51f323d 100644 --- a/lib/Target/R600/SIInstrFormats.td +++ b/lib/Target/R600/SIInstrFormats.td @@ -185,25 +185,25 @@ class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> : class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> : Enc64 <outs, ins, asm, pattern> { - bits<8> VDST; - bits<9> SRC0; - bits<9> SRC1; - bits<9> SRC2; - bits<3> ABS; - bits<1> CLAMP; - bits<2> OMOD; - bits<3> NEG; - - let Inst{7-0} = VDST; - let Inst{10-8} = ABS; - let Inst{11} = CLAMP; + bits<8> dst; + bits<9> src0; + bits<9> src1; + bits<9> src2; + bits<3> abs; + bits<1> clamp; + bits<2> omod; + bits<3> neg; + + let Inst{7-0} = dst; + let Inst{10-8} = abs; + let Inst{11} = clamp; let Inst{25-17} = op; let Inst{31-26} = 0x34; //encoding - let Inst{40-32} = SRC0; - let Inst{49-41} = SRC1; - let Inst{58-50} = SRC2; - let Inst{60-59} = OMOD; - let Inst{63-61} = NEG; + let Inst{40-32} = src0; + let Inst{49-41} = src1; + let Inst{58-50} = src2; + let Inst{60-59} = omod; + let Inst{63-61} = neg; let mayLoad = 0; let mayStore = 0; @@ -213,23 +213,23 @@ class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> : class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> : Enc64 <outs, ins, asm, pattern> { - bits<8> VDST; - bits<9> SRC0; - bits<9> SRC1; - bits<9> SRC2; - bits<7> SDST; - bits<2> OMOD; - bits<3> NEG; + bits<8> dst; + bits<9> src0; + bits<9> src1; + bits<9> src2; + bits<7> sdst; + bits<2> omod; + bits<3> neg; - let Inst{7-0} = VDST; - let Inst{14-8} = SDST; + let Inst{7-0} = dst; + let Inst{14-8} = sdst; let Inst{25-17} = op; let Inst{31-26} = 0x34; //encoding - let Inst{40-32} = SRC0; - let Inst{49-41} = SRC1; - let Inst{58-50} = SRC2; - let Inst{60-59} = OMOD; - let Inst{63-61} = NEG; + let Inst{40-32} = src0; + let Inst{49-41} = src1; + let Inst{58-50} = src2; + let Inst{60-59} = omod; + let Inst{63-61} = neg; let mayLoad = 0; let mayStore = 0; diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp index 9a04c60..cb582a6 100644 --- a/lib/Target/R600/SIInstrInfo.cpp +++ b/lib/Target/R600/SIInstrInfo.cpp @@ -24,7 +24,7 @@ using namespace llvm; SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm) : AMDGPUInstrInfo(tm), - RI(tm, *this) + RI(tm) { } const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const { diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td index aafc331..42fa95f 100644 --- a/lib/Target/R600/SIInstrInfo.td +++ b/lib/Target/R600/SIInstrInfo.td @@ -26,10 +26,6 @@ def HI32 : SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(N->getZExtValue() >> 32, MVT::i32); }]>; -def SIbuffer_store : SDNode<"AMDGPUISD::BUFFER_STORE", - SDTypeProfile<0, 3, [SDTCisPtrTy<1>, SDTCisInt<2>]>, - [SDNPHasChain, SDNPMayStore]>; - def IMM8bitDWORD : ImmLeaf < i32, [{ return (Imm & ~0x3FC) == 0; @@ -39,13 +35,16 @@ def IMM8bitDWORD : ImmLeaf < }]> >; -def IMM12bit : ImmLeaf < - i16, - [{return isUInt<12>(Imm);}] +def as_i16imm : SDNodeXForm<imm, [{ + return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i16); +}]>; + +def IMM12bit : PatLeaf <(imm), + [{return isUInt<12>(N->getZExtValue());}] >; class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{ - return ((const SITargetLowering &)TLI).analyzeImmediate(N) == 0; + return (*(const SITargetLowering *)TLI).analyzeImmediate(N) == 0; }]>; //===----------------------------------------------------------------------===// @@ -163,8 +162,8 @@ multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src, i32imm:$omod, i32imm:$neg), opName#"_e64 $dst, $src0, $abs, $clamp, $omod, $neg", [] >, VOP <opName> { - let SRC1 = SIOperand.ZERO; - let SRC2 = SIOperand.ZERO; + let src1 = SIOperand.ZERO; + let src2 = SIOperand.ZERO; } } @@ -189,7 +188,7 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc, i32imm:$omod, i32imm:$neg), opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", [] >, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> { - let SRC2 = SIOperand.ZERO; + let src2 = SIOperand.ZERO; } } @@ -217,11 +216,11 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern, i32imm:$omod, i32imm:$neg), opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", [] >, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> { - let SRC2 = SIOperand.ZERO; + let src2 = SIOperand.ZERO; /* the VOP2 variant puts the carry out into VCC, the VOP3 variant can write it into any SGPR. We currently don't use the carry out, so for now hardcode it to VCC as well */ - let SDST = SIOperand.VCC; + let sdst = SIOperand.VCC; } } @@ -244,7 +243,7 @@ multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc, [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))] ) >, VOP <opName> { - let SRC2 = SIOperand.ZERO; + let src2 = SIOperand.ZERO; } } @@ -263,6 +262,19 @@ class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 < opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern >, VOP <opName>; +class VOP3_64_Shift <bits <9> op, string opName, list<dag> pattern> : VOP3 < + op, (outs VReg_64:$dst), + (ins VSrc_64:$src0, VSrc_32:$src1), + opName#" $dst, $src0, $src1", pattern +>, VOP <opName> { + + let src2 = SIOperand.ZERO; + let abs = 0; + let clamp = 0; + let omod = 0; + let neg = 0; +} + class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 < op, (outs VReg_64:$dst), (ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2, @@ -287,31 +299,41 @@ class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBU let mayLoad = 0; } -class MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> : MUBUF < - op, - (outs regClass:$vdata), - (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64, - i1imm:$lds, VReg_32:$vaddr, SReg_128:$srsrc, i1imm:$slc, - i1imm:$tfe, SSrc_32:$soffset), - asm#" $vdata, $offset, $offen, $idxen, $glc, $addr64, " - #"$lds, $vaddr, $srsrc, $slc, $tfe, $soffset", - []> { - let mayLoad = 1; - let mayStore = 0; +multiclass MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> { + + let glc = 0, lds = 0, slc = 0, tfe = 0, soffset = 128 /* ZERO */, + mayLoad = 1 in { + + let offen = 1, idxen = 0, addr64 = 0, offset = 0 in { + def _OFFEN : MUBUF <op, (outs regClass:$vdata), + (ins SReg_128:$srsrc, VReg_32:$vaddr), + asm#" $vdata, $srsrc + $vaddr", []>; + } + + let offen = 0, idxen = 1, addr64 = 0 in { + def _IDXEN : MUBUF <op, (outs regClass:$vdata), + (ins SReg_128:$srsrc, VReg_32:$vaddr, i16imm:$offset), + asm#" $vdata, $srsrc[$vaddr] + $offset", []>; + } + + let offen = 0, idxen = 0, addr64 = 1 in { + def _ADDR64 : MUBUF <op, (outs regClass:$vdata), + (ins SReg_128:$srsrc, VReg_64:$vaddr, i16imm:$offset), + asm#" $vdata, $srsrc + $vaddr + $offset", []>; + } + } } class MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass, ValueType VT> : - MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr), - name#" $vdata, $srsrc + $vaddr", - [(SIbuffer_store (VT vdataClass:$vdata), (i128 SReg_128:$srsrc), - (i64 VReg_64:$vaddr))]> { + MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr, i16imm:$offset), + name#" $vdata, $srsrc + $vaddr + $offset", + []> { let mayLoad = 0; let mayStore = 1; // Encoding - let offset = 0; let offen = 0; let idxen = 0; let glc = 0; @@ -335,7 +357,22 @@ class MTBUF_Load_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF let mayStore = 0; } -class MIMG_Load_Helper <bits<7> op, string asm> : MIMG < +class MIMG_NoSampler_Helper <bits<7> op, string asm> : MIMG < + op, + (outs VReg_128:$vdata), + (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128, + i1imm:$tfe, i1imm:$lwe, i1imm:$slc, unknown:$vaddr, + SReg_256:$srsrc), + asm#" $vdata, $dmask, $unorm, $glc, $da, $r128," + #" $tfe, $lwe, $slc, $vaddr, $srsrc", + []> { + let SSAMP = 0; + let mayLoad = 1; + let mayStore = 0; + let hasPostISelHook = 1; +} + +class MIMG_Sampler_Helper <bits<7> op, string asm> : MIMG < op, (outs VReg_128:$vdata), (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128, @@ -382,7 +419,7 @@ def getCommuteOrig : InstrMapping { // Test if the supplied opcode is an MIMG instruction def isMIMG : InstrMapping { - let FilterClass = "MIMG_Load_Helper"; + let FilterClass = "MIMG"; let RowFields = ["Inst"]; let ColFields = ["Size"]; let KeyCol = ["8"]; diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td index 3ff4548..e8ed2dd 100644 --- a/lib/Target/R600/SIInstructions.td +++ b/lib/Target/R600/SIInstructions.td @@ -22,8 +22,8 @@ def InterpSlot : Operand<i32> { let PrintMethod = "printInterpSlot"; } -def isSI : Predicate<"Subtarget.device()" - "->getGeneration() == AMDGPUDeviceInfo::HD7XXX">; +def isSI : Predicate<"Subtarget.getGeneration() " + "== AMDGPUSubtarget::SOUTHERN_ISLANDS">; let Predicates = [isSI] in { @@ -394,18 +394,18 @@ defm V_CMPX_CLASS_F64 : VOPC_64 <0x000000b8, "V_CMPX_CLASS_F64">; //def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>; //def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>; //def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "BUFFER_LOAD_FORMAT_XYZ", []>; -def BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>; +defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>; //def BUFFER_STORE_FORMAT_X : MUBUF_ <0x00000004, "BUFFER_STORE_FORMAT_X", []>; //def BUFFER_STORE_FORMAT_XY : MUBUF_ <0x00000005, "BUFFER_STORE_FORMAT_XY", []>; //def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>; //def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>; -//def BUFFER_LOAD_UBYTE : MUBUF_ <0x00000008, "BUFFER_LOAD_UBYTE", []>; +defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <0x00000008, "BUFFER_LOAD_UBYTE", VReg_32>; //def BUFFER_LOAD_SBYTE : MUBUF_ <0x00000009, "BUFFER_LOAD_SBYTE", []>; //def BUFFER_LOAD_USHORT : MUBUF_ <0x0000000a, "BUFFER_LOAD_USHORT", []>; //def BUFFER_LOAD_SSHORT : MUBUF_ <0x0000000b, "BUFFER_LOAD_SSHORT", []>; -def BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>; -def BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>; -def BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>; +defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>; +defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>; +defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>; //def BUFFER_STORE_BYTE : MUBUF_ <0x00000018, "BUFFER_STORE_BYTE", []>; //def BUFFER_STORE_SHORT : MUBUF_ <0x0000001a, "BUFFER_STORE_SHORT", []>; @@ -416,7 +416,10 @@ def BUFFER_STORE_DWORD : MUBUF_Store_Helper < def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper < 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64, i64 >; -//def BUFFER_STORE_DWORDX4 : MUBUF_DWORDX4 <0x0000001e, "BUFFER_STORE_DWORDX4", []>; + +def BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper < + 0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128, v4i32 +>; //def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>; //def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>; //def BUFFER_ATOMIC_ADD : MUBUF_ <0x00000032, "BUFFER_ATOMIC_ADD", []>; @@ -495,7 +498,7 @@ defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper < //def S_MEMTIME : SMRD_ <0x0000001e, "S_MEMTIME", []>; //def S_DCACHE_INV : SMRD_ <0x0000001f, "S_DCACHE_INV", []>; //def IMAGE_LOAD : MIMG_NoPattern_ <"IMAGE_LOAD", 0x00000000>; -//def IMAGE_LOAD_MIP : MIMG_NoPattern_ <"IMAGE_LOAD_MIP", 0x00000001>; +def IMAGE_LOAD_MIP : MIMG_NoSampler_Helper <0x00000001, "IMAGE_LOAD_MIP">; //def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_PCK", 0x00000002>; //def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"IMAGE_LOAD_PCK_SGN", 0x00000003>; //def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_MIP_PCK", 0x00000004>; @@ -504,7 +507,7 @@ defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper < //def IMAGE_STORE_MIP : MIMG_NoPattern_ <"IMAGE_STORE_MIP", 0x00000009>; //def IMAGE_STORE_PCK : MIMG_NoPattern_ <"IMAGE_STORE_PCK", 0x0000000a>; //def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"IMAGE_STORE_MIP_PCK", 0x0000000b>; -//def IMAGE_GET_RESINFO : MIMG_NoPattern_ <"IMAGE_GET_RESINFO", 0x0000000e>; +def IMAGE_GET_RESINFO : MIMG_NoSampler_Helper <0x0000000e, "IMAGE_GET_RESINFO">; //def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_SWAP", 0x0000000f>; //def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_CMPSWAP", 0x00000010>; //def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"IMAGE_ATOMIC_ADD", 0x00000011>; @@ -522,20 +525,20 @@ defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper < //def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_FCMPSWAP", 0x0000001d>; //def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMIN", 0x0000001e>; //def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMAX", 0x0000001f>; -def IMAGE_SAMPLE : MIMG_Load_Helper <0x00000020, "IMAGE_SAMPLE">; +def IMAGE_SAMPLE : MIMG_Sampler_Helper <0x00000020, "IMAGE_SAMPLE">; //def IMAGE_SAMPLE_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL", 0x00000021>; -def IMAGE_SAMPLE_D : MIMG_Load_Helper <0x00000022, "IMAGE_SAMPLE_D">; +def IMAGE_SAMPLE_D : MIMG_Sampler_Helper <0x00000022, "IMAGE_SAMPLE_D">; //def IMAGE_SAMPLE_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL", 0x00000023>; -def IMAGE_SAMPLE_L : MIMG_Load_Helper <0x00000024, "IMAGE_SAMPLE_L">; -def IMAGE_SAMPLE_B : MIMG_Load_Helper <0x00000025, "IMAGE_SAMPLE_B">; +def IMAGE_SAMPLE_L : MIMG_Sampler_Helper <0x00000024, "IMAGE_SAMPLE_L">; +def IMAGE_SAMPLE_B : MIMG_Sampler_Helper <0x00000025, "IMAGE_SAMPLE_B">; //def IMAGE_SAMPLE_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL", 0x00000026>; //def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>; -def IMAGE_SAMPLE_C : MIMG_Load_Helper <0x00000028, "IMAGE_SAMPLE_C">; +def IMAGE_SAMPLE_C : MIMG_Sampler_Helper <0x00000028, "IMAGE_SAMPLE_C">; //def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>; //def IMAGE_SAMPLE_C_D : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D", 0x0000002a>; //def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>; -def IMAGE_SAMPLE_C_L : MIMG_Load_Helper <0x0000002c, "IMAGE_SAMPLE_C_L">; -def IMAGE_SAMPLE_C_B : MIMG_Load_Helper <0x0000002d, "IMAGE_SAMPLE_C_B">; +def IMAGE_SAMPLE_C_L : MIMG_Sampler_Helper <0x0000002c, "IMAGE_SAMPLE_C_L">; +def IMAGE_SAMPLE_C_B : MIMG_Sampler_Helper <0x0000002d, "IMAGE_SAMPLE_C_B">; //def IMAGE_SAMPLE_C_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL", 0x0000002e>; //def IMAGE_SAMPLE_C_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ", 0x0000002f>; //def IMAGE_SAMPLE_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_O", 0x00000030>; @@ -602,7 +605,9 @@ defm V_READFIRSTLANE_B32 : VOP1_32 <0x00000002, "V_READFIRSTLANE_B32", []>; defm V_CVT_F32_I32 : VOP1_32 <0x00000005, "V_CVT_F32_I32", [(set f32:$dst, (sint_to_fp i32:$src0))] >; -defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32", []>; +defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32", + [(set f32:$dst, (uint_to_fp i32:$src0))] +>; defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32", []>; defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32", [(set i32:$dst, (fp_to_sint f32:$src0))] @@ -624,7 +629,9 @@ defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>; defm V_FRACT_F32 : VOP1_32 <0x00000020, "V_FRACT_F32", [(set f32:$dst, (AMDGPUfract f32:$src0))] >; -defm V_TRUNC_F32 : VOP1_32 <0x00000021, "V_TRUNC_F32", []>; +defm V_TRUNC_F32 : VOP1_32 <0x00000021, "V_TRUNC_F32", + [(set f32:$dst, (int_AMDGPU_trunc f32:$src0))] +>; defm V_CEIL_F32 : VOP1_32 <0x00000022, "V_CEIL_F32", [(set f32:$dst, (fceil f32:$src0))] >; @@ -848,10 +855,18 @@ defm V_MAX_LEGACY_F32 : VOP2_32 <0x0000000e, "V_MAX_LEGACY_F32", defm V_MIN_F32 : VOP2_32 <0x0000000f, "V_MIN_F32", []>; defm V_MAX_F32 : VOP2_32 <0x00000010, "V_MAX_F32", []>; -defm V_MIN_I32 : VOP2_32 <0x00000011, "V_MIN_I32", []>; -defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32", []>; -defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32", []>; -defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32", []>; +defm V_MIN_I32 : VOP2_32 <0x00000011, "V_MIN_I32", + [(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))] +>; +defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32", + [(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))] +>; +defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32", + [(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))] +>; +defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32", + [(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))] +>; defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32", [(set i32:$dst, (srl i32:$src0, i32:$src1))] @@ -952,6 +967,8 @@ def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32", []>; def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64", []>; //def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>; def V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>; +def : ROTRPattern <V_ALIGNBIT_B32>; + def V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>; def V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>; ////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>; @@ -970,9 +987,15 @@ def V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>; ////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>; def V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32", []>; def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64", []>; -def V_LSHL_B64 : VOP3_64 <0x00000161, "V_LSHL_B64", []>; -def V_LSHR_B64 : VOP3_64 <0x00000162, "V_LSHR_B64", []>; -def V_ASHR_I64 : VOP3_64 <0x00000163, "V_ASHR_I64", []>; + +def V_LSHL_B64 : VOP3_64_Shift <0x00000161, "V_LSHL_B64", + [(set i64:$dst, (shl i64:$src0, i32:$src1))] +>; +def V_LSHR_B64 : VOP3_64_Shift <0x00000162, "V_LSHR_B64", + [(set i64:$dst, (srl i64:$src0, i32:$src1))] +>; +def V_ASHR_I64 : VOP3_64_Shift <0x00000163, "V_ASHR_I64", []>; + def V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>; def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>; def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>; @@ -1180,6 +1203,19 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>; } // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0] +// This psuedo instruction takes a pointer as input and outputs a resource +// constant that can be used with the ADDR64 MUBUF instructions. + +let usesCustomInserter = 1 in { + +def SI_ADDR64_RSRC : InstSI < + (outs SReg_128:$srsrc), + (ins SReg_64:$ptr), + "", [] +>; + +} // end usesCustomInserter + } // end IsCodeGenOnly, isPseudo def : Pat< @@ -1194,10 +1230,8 @@ def : Pat < /* int_SI_vs_load_input */ def : Pat< - (int_SI_vs_load_input v16i8:$tlst, IMM12bit:$attr_offset, - i32:$buf_idx_vgpr), - (BUFFER_LOAD_FORMAT_XYZW imm:$attr_offset, 0, 1, 0, 0, 0, - $buf_idx_vgpr, $tlst, 0, 0, 0) + (int_SI_vs_load_input v16i8:$tlst, IMM12bit:$attr_offset, i32:$buf_idx_vgpr), + (BUFFER_LOAD_FORMAT_XYZW_IDXEN $tlst, $buf_idx_vgpr, imm:$attr_offset) >; /* int_SI_export */ @@ -1269,6 +1303,36 @@ defm : SamplePatterns<v4i32>; defm : SamplePatterns<v8i32>; defm : SamplePatterns<v16i32>; +/* int_SI_imageload for texture fetches consuming varying address parameters */ +class ImageLoadPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat < + (name addr_type:$addr, v32i8:$rsrc, imm), + (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc) +>; + +class ImageLoadArrayPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat < + (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY), + (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc) +>; + +multiclass ImageLoadPatterns<ValueType addr_type> { + def : ImageLoadPattern <int_SI_imageload, IMAGE_LOAD_MIP, addr_type>; + def : ImageLoadArrayPattern <int_SI_imageload, IMAGE_LOAD_MIP, addr_type>; +} + +defm : ImageLoadPatterns<v2i32>; +defm : ImageLoadPatterns<v4i32>; + +/* Image resource information */ +def : Pat < + (int_SI_resinfo i32:$mipid, v32i8:$rsrc, imm), + (IMAGE_GET_RESINFO 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc) +>; + +def : Pat < + (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY), + (IMAGE_GET_RESINFO 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc) +>; + /********** ============================================ **********/ /********** Extraction, Insertion, Building and Casting **********/ /********** ============================================ **********/ @@ -1492,7 +1556,7 @@ def : Pat < // 3. Offset in an 32Bit VGPR def : Pat < (int_SI_load_const v16i8:$sbase, i32:$voff), - (BUFFER_LOAD_DWORD 0, 1, 0, 0, 0, 0, $voff, $sbase, 0, 0, 0) + (BUFFER_LOAD_DWORD_OFFEN $sbase, $voff) >; // The multiplication scales from [0,1] to the unsigned integer range @@ -1539,9 +1603,59 @@ multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> { defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>; defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>; +defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, i64>; defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v16i8>; defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>; +//===----------------------------------------------------------------------===// +// MUBUF Patterns +//===----------------------------------------------------------------------===// + +multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt, + PatFrag global_ld, PatFrag constant_ld> { + def : Pat < + (vt (global_ld (add i64:$ptr, (i64 IMM12bit:$offset)))), + (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, (as_i16imm $offset)) + >; + + def : Pat < + (vt (global_ld i64:$ptr)), + (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, 0) + >; + + def : Pat < + (vt (global_ld (add i64:$ptr, i64:$offset))), + (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0) + >; + + def : Pat < + (vt (constant_ld (add i64:$ptr, i64:$offset))), + (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0) + >; +} + +defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32, + global_load, constant_load>; +defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, + zextloadi8_global, zextloadi8_constant>; + +multiclass MUBUFStore_Pattern <MUBUF Instr, ValueType vt> { + + def : Pat < + (global_store vt:$value, i64:$ptr), + (Instr $value, (SI_ADDR64_RSRC (i64 0)), $ptr, 0) + >; + + def : Pat < + (global_store vt:$value, (add i64:$ptr, i64:$offset)), + (Instr $value, (SI_ADDR64_RSRC $ptr), $offset, 0) + >; +} + +defm : MUBUFStore_Pattern <BUFFER_STORE_DWORD, i32>; +defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, i64>; +defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4, v4i32>; + /********** ====================== **********/ /********** Indirect adressing **********/ /********** ====================== **********/ @@ -1592,4 +1706,10 @@ def : Pat< (V_CMP_U_F32_e64 $src0, $src1) >; +//============================================================================// +// Miscellaneous Optimization Patterns +//============================================================================// + +def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e32>; + } // End isSI predicate diff --git a/lib/Target/R600/SIIntrinsics.td b/lib/Target/R600/SIIntrinsics.td index 16d9d81..224cd2f 100644 --- a/lib/Target/R600/SIIntrinsics.td +++ b/lib/Target/R600/SIIntrinsics.td @@ -25,6 +25,10 @@ let TargetPrefix = "SI", isTarget = 1 in { def int_SI_sampleb : Sample; def int_SI_samplel : Sample; + def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>; + + def int_SI_resinfo : Intrinsic <[llvm_v4i32_ty], [llvm_i32_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>; + /* Interpolation Intrinsics */ def int_SI_fs_constant : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp index 2b60eb9..5b434fb 100644 --- a/lib/Target/R600/SILowerControlFlow.cpp +++ b/lib/Target/R600/SILowerControlFlow.cpp @@ -91,8 +91,7 @@ private: public: SILowerControlFlowPass(TargetMachine &tm) : - MachineFunctionPass(ID), TRI(tm.getRegisterInfo()), - TII(tm.getInstrInfo()) { } + MachineFunctionPass(ID), TRI(0), TII(0) { } virtual bool runOnMachineFunction(MachineFunction &MF); @@ -408,6 +407,8 @@ void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) { } bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { + TII = MF.getTarget().getInstrInfo(); + TRI = MF.getTarget().getRegisterInfo(); bool HaveKill = false; bool NeedWQM = false; diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp index 99278ae..ddfc54e 100644 --- a/lib/Target/R600/SIRegisterInfo.cpp +++ b/lib/Target/R600/SIRegisterInfo.cpp @@ -18,11 +18,9 @@ using namespace llvm; -SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm, - const TargetInstrInfo &tii) -: AMDGPURegisterInfo(tm, tii), - TM(tm), - TII(tii) +SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm) +: AMDGPURegisterInfo(tm), + TM(tm) { } BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const { diff --git a/lib/Target/R600/SIRegisterInfo.h b/lib/Target/R600/SIRegisterInfo.h index caec228..c322f94 100644 --- a/lib/Target/R600/SIRegisterInfo.h +++ b/lib/Target/R600/SIRegisterInfo.h @@ -21,13 +21,11 @@ namespace llvm { class AMDGPUTargetMachine; -class TargetInstrInfo; struct SIRegisterInfo : public AMDGPURegisterInfo { AMDGPUTargetMachine &TM; - const TargetInstrInfo &TII; - SIRegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii); + SIRegisterInfo(AMDGPUTargetMachine &tm); virtual BitVector getReservedRegs(const MachineFunction &MF) const; diff --git a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp index 46b1f18..f437564 100644 --- a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp +++ b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "AMDGPU.h" +#include "AMDGPUTargetMachine.h" #include "llvm/Support/TargetRegistry.h" using namespace llvm; |