//===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains instruction defs that are common to all hw codegen // targets. // //===----------------------------------------------------------------------===// class AMDGPUInst pattern> : Instruction { field bit isRegisterLoad = 0; field bit isRegisterStore = 0; let Namespace = "AMDGPU"; let OutOperandList = outs; let InOperandList = ins; let AsmString = asm; let Pattern = pattern; let Itinerary = NullALU; let TSFlags{63} = isRegisterLoad; let TSFlags{62} = isRegisterStore; } class AMDGPUShaderInst pattern> : AMDGPUInst { field bits<32> Inst = 0xffffffff; } def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">; def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">; def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">; def InstFlag : OperandWithDefaultOps ; def ADDRIndirect : ComplexPattern; let OperandType = "OPERAND_IMMEDIATE" in { def u32imm : Operand { let PrintMethod = "printU32ImmOperand"; } def u16imm : Operand { let PrintMethod = "printU16ImmOperand"; } def u8imm : Operand { let PrintMethod = "printU8ImmOperand"; } } // End OperandType = "OPERAND_IMMEDIATE" //===--------------------------------------------------------------------===// // Custom Operands //===--------------------------------------------------------------------===// def brtarget : Operand; //===----------------------------------------------------------------------===// // PatLeafs for floating-point comparisons //===----------------------------------------------------------------------===// def COND_OEQ : PatLeaf < (cond), [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}] >; def COND_ONE : PatLeaf < (cond), [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}] >; def COND_OGT : PatLeaf < (cond), [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}] >; def COND_OGE : PatLeaf < (cond), [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}] >; def COND_OLT : PatLeaf < (cond), [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}] >; def COND_OLE : PatLeaf < (cond), [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}] >; def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>; def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>; //===----------------------------------------------------------------------===// // PatLeafs for unsigned / unordered comparisons //===----------------------------------------------------------------------===// def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>; def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>; def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>; def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>; def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>; def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>; // XXX - For some reason R600 version is preferring to use unordered // for setne? def COND_UNE_NE : PatLeaf < (cond), [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}] >; //===----------------------------------------------------------------------===// // PatLeafs for signed comparisons //===----------------------------------------------------------------------===// def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>; def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>; def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>; def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>; //===----------------------------------------------------------------------===// // PatLeafs for integer equality //===----------------------------------------------------------------------===// def COND_EQ : PatLeaf < (cond), [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}] >; def COND_NE : PatLeaf < (cond), [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}] >; def COND_NULL : PatLeaf < (cond), [{(void)N; return false;}] >; //===----------------------------------------------------------------------===// // Load/Store Pattern Fragments //===----------------------------------------------------------------------===// class PrivateMemOp : PatFrag (N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; }]>; class PrivateLoad : PrivateMemOp < (ops node:$ptr), (op node:$ptr) >; class PrivateStore : PrivateMemOp < (ops node:$value, node:$ptr), (op node:$value, node:$ptr) >; def load_private : PrivateLoad ; def truncstorei8_private : PrivateStore ; def truncstorei16_private : PrivateStore ; def store_private : PrivateStore ; def global_store : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ return isGlobalStore(dyn_cast(N)); }]>; // Global address space loads def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return isGlobalLoad(dyn_cast(N)); }]>; // Constant address space loads def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return isConstantLoad(dyn_cast(N), -1); }]>; def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ LoadSDNode *L = cast(N); return L->getExtensionType() == ISD::ZEXTLOAD || L->getExtensionType() == ISD::EXTLOAD; }]>; def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ return cast(N)->getMemoryVT() == MVT::i8; }]>; def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ return isGlobalLoad(dyn_cast(N)); }]>; def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ return isGlobalLoad(dyn_cast(N)); }]>; def az_extloadi8_flat : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ return isFlatLoad(dyn_cast(N)); }]>; def sextloadi8_flat : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ return isFlatLoad(dyn_cast(N)); }]>; def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ return isConstantLoad(dyn_cast(N), -1); }]>; def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ return isConstantLoad(dyn_cast(N), -1); }]>; def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ return isLocalLoad(dyn_cast(N)); }]>; def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ return isLocalLoad(dyn_cast(N)); }]>; def extloadi8_private : PrivateLoad ; def sextloadi8_private : PrivateLoad ; def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ return cast(N)->getMemoryVT() == MVT::i16; }]>; def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ return isGlobalLoad(dyn_cast(N)); }]>; def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ return isGlobalLoad(dyn_cast(N)); }]>; def az_extloadi16_flat : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ return isFlatLoad(dyn_cast(N)); }]>; def sextloadi16_flat : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ return isFlatLoad(dyn_cast(N)); }]>; def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ return isConstantLoad(dyn_cast(N), -1); }]>; def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ return isConstantLoad(dyn_cast(N), -1); }]>; def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ return isLocalLoad(dyn_cast(N)); }]>; def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ return isLocalLoad(dyn_cast(N)); }]>; def extloadi16_private : PrivateLoad ; def sextloadi16_private : PrivateLoad ; def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ return cast(N)->getMemoryVT() == MVT::i32; }]>; def az_extloadi32_global : PatFrag<(ops node:$ptr), (az_extloadi32 node:$ptr), [{ return isGlobalLoad(dyn_cast(N)); }]>; def az_extloadi32_flat : PatFrag<(ops node:$ptr), (az_extloadi32 node:$ptr), [{ return isFlatLoad(dyn_cast(N)); }]>; def az_extloadi32_constant : PatFrag<(ops node:$ptr), (az_extloadi32 node:$ptr), [{ return isConstantLoad(dyn_cast(N), -1); }]>; def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr), (truncstorei8 node:$val, node:$ptr), [{ return isGlobalStore(dyn_cast(N)); }]>; def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr), (truncstorei16 node:$val, node:$ptr), [{ return isGlobalStore(dyn_cast(N)); }]>; def truncstorei8_flat : PatFrag<(ops node:$val, node:$ptr), (truncstorei8 node:$val, node:$ptr), [{ return isFlatStore(dyn_cast(N)); }]>; def truncstorei16_flat : PatFrag<(ops node:$val, node:$ptr), (truncstorei16 node:$val, node:$ptr), [{ return isFlatStore(dyn_cast(N)); }]>; def local_store : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ return isLocalStore(dyn_cast(N)); }]>; def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr), (truncstorei8 node:$val, node:$ptr), [{ return isLocalStore(dyn_cast(N)); }]>; def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr), (truncstorei16 node:$val, node:$ptr), [{ return isLocalStore(dyn_cast(N)); }]>; def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return isLocalLoad(dyn_cast(N)); }]>; class Aligned8Bytes : PatFrag (N)->getAlignment() % 8 == 0; }]>; def local_load_aligned8bytes : Aligned8Bytes < (ops node:$ptr), (local_load node:$ptr) >; def local_store_aligned8bytes : Aligned8Bytes < (ops node:$val, node:$ptr), (local_store node:$val, node:$ptr) >; class local_binary_atomic_op : PatFrag<(ops node:$ptr, node:$value), (atomic_op node:$ptr, node:$value), [{ return cast(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; }]>; def atomic_swap_local : local_binary_atomic_op; def atomic_load_add_local : local_binary_atomic_op; def atomic_load_sub_local : local_binary_atomic_op; def atomic_load_and_local : local_binary_atomic_op; def atomic_load_or_local : local_binary_atomic_op; def atomic_load_xor_local : local_binary_atomic_op; def atomic_load_nand_local : local_binary_atomic_op; def atomic_load_min_local : local_binary_atomic_op; def atomic_load_max_local : local_binary_atomic_op; def atomic_load_umin_local : local_binary_atomic_op; def atomic_load_umax_local : local_binary_atomic_op; def mskor_global : PatFrag<(ops node:$val, node:$ptr), (AMDGPUstore_mskor node:$val, node:$ptr), [{ return dyn_cast(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; }]>; def atomic_cmp_swap_32_local : PatFrag<(ops node:$ptr, node:$cmp, node:$swap), (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ AtomicSDNode *AN = cast(N); return AN->getMemoryVT() == MVT::i32 && AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; }]>; def atomic_cmp_swap_64_local : PatFrag<(ops node:$ptr, node:$cmp, node:$swap), (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ AtomicSDNode *AN = cast(N); return AN->getMemoryVT() == MVT::i64 && AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; }]>; def flat_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return isFlatLoad(dyn_cast(N)); }]>; def flat_store : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ return isFlatStore(dyn_cast(N)); }]>; def mskor_flat : PatFrag<(ops node:$val, node:$ptr), (AMDGPUstore_mskor node:$val, node:$ptr), [{ return dyn_cast(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS; }]>; class global_binary_atomic_op : PatFrag< (ops node:$ptr, node:$value), (atomic_op node:$ptr, node:$value), [{return cast(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}] >; def atomic_swap_global : global_binary_atomic_op; def atomic_add_global : global_binary_atomic_op; def atomic_and_global : global_binary_atomic_op; def atomic_max_global : global_binary_atomic_op; def atomic_min_global : global_binary_atomic_op; def atomic_or_global : global_binary_atomic_op; def atomic_sub_global : global_binary_atomic_op; def atomic_umax_global : global_binary_atomic_op; def atomic_umin_global : global_binary_atomic_op; def atomic_xor_global : global_binary_atomic_op; //===----------------------------------------------------------------------===// // Misc Pattern Fragments //===----------------------------------------------------------------------===// class Constants { int TWO_PI = 0x40c90fdb; int PI = 0x40490fdb; int TWO_PI_INV = 0x3e22f983; int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding int FP32_NEG_ONE = 0xbf800000; int FP32_ONE = 0x3f800000; } def CONST : Constants; def FP_ZERO : PatLeaf < (fpimm), [{return N->getValueAPF().isZero();}] >; def FP_ONE : PatLeaf < (fpimm), [{return N->isExactlyValue(1.0);}] >; def FP_HALF : PatLeaf < (fpimm), [{return N->isExactlyValue(0.5);}] >; let isCodeGenOnly = 1, isPseudo = 1 in { let usesCustomInserter = 1 in { class CLAMP : AMDGPUShaderInst < (outs rc:$dst), (ins rc:$src0), "CLAMP $dst, $src0", [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))] >; class FABS : AMDGPUShaderInst < (outs rc:$dst), (ins rc:$src0), "FABS $dst, $src0", [(set f32:$dst, (fabs f32:$src0))] >; class FNEG : AMDGPUShaderInst < (outs rc:$dst), (ins rc:$src0), "FNEG $dst, $src0", [(set f32:$dst, (fneg f32:$src0))] >; } // usesCustomInserter = 1 multiclass RegisterLoadStore { let UseNamedOperandTable = 1 in { def RegisterLoad : AMDGPUShaderInst < (outs dstClass:$dst), (ins addrClass:$addr, i32imm:$chan), "RegisterLoad $dst, $addr", [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))] > { let isRegisterLoad = 1; } def RegisterStore : AMDGPUShaderInst < (outs), (ins dstClass:$val, addrClass:$addr, i32imm:$chan), "RegisterStore $val, $addr", [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))] > { let isRegisterStore = 1; } } } } // End isCodeGenOnly = 1, isPseudo = 1 /* Generic helper patterns for intrinsics */ /* -------------------------------------- */ class POW_Common : Pat < (fpow f32:$src0, f32:$src1), (exp_ieee (mul f32:$src1, (log_ieee f32:$src0))) >; /* Other helper patterns */ /* --------------------- */ /* Extract element pattern */ class Extract_Element : Pat< (sub_type (vector_extract vec_type:$src, sub_idx)), (EXTRACT_SUBREG $src, sub_reg) >; /* Insert element pattern */ class Insert_Element : Pat < (vector_insert vec_type:$vec, elem_type:$elem, sub_idx), (INSERT_SUBREG $vec, $elem, sub_reg) >; // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer // can handle COPY instructions. // bitconvert pattern class BitConvert : Pat < (dt (bitconvert (st rc:$src0))), (dt rc:$src0) >; // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer // can handle COPY instructions. class DwordAddrPat : Pat < (vt (AMDGPUdwordaddr (vt rc:$addr))), (vt rc:$addr) >; // BFI_INT patterns multiclass BFIPatterns { // Definition from ISA doc: // (y & x) | (z & ~x) def : Pat < (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))), (BFI_INT $x, $y, $z) >; // SHA-256 Ch function // z ^ (x & (y ^ z)) def : Pat < (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))), (BFI_INT $x, $y, $z) >; def : Pat < (fcopysign f32:$src0, f32:$src1), (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1) >; def : Pat < (f64 (fcopysign f64:$src0, f64:$src1)), (REG_SEQUENCE RC64, (i32 (EXTRACT_SUBREG $src0, sub0)), sub0, (BFI_INT (LoadImm32 0x7fffffff), (i32 (EXTRACT_SUBREG $src0, sub1)), (i32 (EXTRACT_SUBREG $src1, sub1))), sub1) >; } // SHA-256 Ma patterns // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y class SHA256MaPattern : Pat < (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))), (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y) >; // Bitfield extract patterns def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{ return isMask_32(N->getZExtValue()); }]>; def IMMPopCount : SDNodeXFormgetTargetConstant(countPopulation(N->getZExtValue()), MVT::i32); }]>; class BFEPattern : Pat < (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)), (BFE $src, $rshift, (MOV (i32 (IMMPopCount $mask)))) >; // rotr pattern class ROTRPattern : Pat < (rotr i32:$src0, i32:$src1), (BIT_ALIGN $src0, $src0, $src1) >; // 24-bit arithmetic patterns def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>; // Special conversion patterns def cvt_rpi_i32_f32 : PatFrag < (ops node:$src), (fp_to_sint (ffloor (fadd $src, FP_HALF))), [{ (void) N; return TM.Options.NoNaNsFPMath; }] >; def cvt_flr_i32_f32 : PatFrag < (ops node:$src), (fp_to_sint (ffloor $src)), [{ (void)N; return TM.Options.NoNaNsFPMath; }] >; /* class UMUL24Pattern : Pat < (mul U24:$x, U24:$y), (UMUL24 $x, $y) >; */ class IMad24Pat : Pat < (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2), (Inst $src0, $src1, $src2) >; class UMad24Pat : Pat < (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2), (Inst $src0, $src1, $src2) >; multiclass Expand24IBitOps { def _expand_imad24 : Pat < (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2), (AddInst (MulInst $src0, $src1), $src2) >; def _expand_imul24 : Pat < (AMDGPUmul_i24 i32:$src0, i32:$src1), (MulInst $src0, $src1) >; } multiclass Expand24UBitOps { def _expand_umad24 : Pat < (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2), (AddInst (MulInst $src0, $src1), $src2) >; def _expand_umul24 : Pat < (AMDGPUmul_u24 i32:$src0, i32:$src1), (MulInst $src0, $src1) >; } class RcpPat : Pat < (fdiv FP_ONE, vt:$src), (RcpInst $src) >; class RsqPat : Pat < (AMDGPUrcp (fsqrt vt:$src)), (RsqInst $src) >; include "R600Instructions.td" include "R700Instructions.td" include "EvergreenInstructions.td" include "CaymanInstructions.td" include "SIInstrInfo.td"