aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Target/Hexagon
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2013-03-20 04:33:54 +0000
committerAndroid Git Automerger <android-git-automerger@android.com>2013-03-20 04:33:54 +0000
commit3dd51ae3a043f2edf9dd2bc7c906c3f602967e5a (patch)
tree90c0395880593bf195fb818c2af1139cb7e846df /lib/Target/Hexagon
parent84ba0bec3eb1a5f63c13a01e6d510ecd85fa6ab7 (diff)
parent2d4629c5d7dcc6582fa7b85a517744f1a3654eba (diff)
downloadexternal_llvm-3dd51ae3a043f2edf9dd2bc7c906c3f602967e5a.zip
external_llvm-3dd51ae3a043f2edf9dd2bc7c906c3f602967e5a.tar.gz
external_llvm-3dd51ae3a043f2edf9dd2bc7c906c3f602967e5a.tar.bz2
am 2d4629c5: Merge branch \'upstream\' into merge_2013_03_18
* commit '2d4629c5d7dcc6582fa7b85a517744f1a3654eba': (424 commits) Change NULL to 0. Register the flush function for each compile unit. Remove trailing spaces. Fix PPC unaligned 64-bit loads and stores ARM cost model: Make some vector integer to float casts cheaper ARM cost model: Correct cost for some cheap float to integer conversions Extend global merge pass to optionally consider global constant variables. Also add some checks to not merge globals used within landing pad instructions or marked as "used". Change test cases to handle unaligned references. Remove unnecessary leading comment characters in lit-only file Add SchedRW annotations to most of X86InstrSSE.td. Annotate X86 arithmetic instructions with SchedRW lists. Check whether a pointer is non-null (isKnownNonNull) in isKnownNonZero. TableGen fix for the new machine model. Include '.test' suffix in target specific lit configs that need it Make the fields in the diagram match the descriptive text above them. Update Fix 80-col. violations in PPCCTRLoops Fix large count and negative constant count handling in PPCCTRLoops Cleanup initial-value constants in PPCCTRLoops Fix integer comparison in DIEInteger::BestForm. ...
Diffstat (limited to 'lib/Target/Hexagon')
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp49
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.h2
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp671
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h14
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.td87
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV4.td17
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp6
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp67
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h22
9 files changed, 414 insertions, 521 deletions
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index fac931a..0a8b1af 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -103,6 +103,16 @@ CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
return false;
}
+ if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
+ LocVT = MVT::i32;
+ ValVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
ofst = State.AllocateStack(4, 4);
State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
@@ -1024,6 +1034,14 @@ SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
}
+SDValue
+HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
+ const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+ SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
+}
+
//===----------------------------------------------------------------------===//
// TargetLowering Implementation
//===----------------------------------------------------------------------===//
@@ -1297,6 +1315,7 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
// Custom legalize GlobalAddress nodes into CONST32.
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
+ setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
// Truncate action?
setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
@@ -1342,7 +1361,6 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
}
- setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::BRIND, MVT::Other, Expand);
if (EmitJumpTables) {
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
@@ -1352,6 +1370,9 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
// Increase jump tables cutover to 5, was 4.
setMinimumJumpTableEntries(5);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
@@ -1365,6 +1386,29 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
setOperationAction(ISD::FREM , MVT::f32, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
+
+ // In V4, we have double word add/sub with carry. The problem with
+ // modelling this instruction is that it produces 2 results - Rdd and Px.
+ // To model update of Px, we will have to use Defs[p0..p3] which will
+ // cause any predicate live range to spill. So, we pretend we dont't
+ // have these instructions.
+ setOperationAction(ISD::ADDE, MVT::i8, Expand);
+ setOperationAction(ISD::ADDE, MVT::i16, Expand);
+ setOperationAction(ISD::ADDE, MVT::i32, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+ setOperationAction(ISD::SUBE, MVT::i8, Expand);
+ setOperationAction(ISD::SUBE, MVT::i16, Expand);
+ setOperationAction(ISD::SUBE, MVT::i32, Expand);
+ setOperationAction(ISD::SUBE, MVT::i64, Expand);
+ setOperationAction(ISD::ADDC, MVT::i8, Expand);
+ setOperationAction(ISD::ADDC, MVT::i16, Expand);
+ setOperationAction(ISD::ADDC, MVT::i32, Expand);
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::SUBC, MVT::i8, Expand);
+ setOperationAction(ISD::SUBC, MVT::i16, Expand);
+ setOperationAction(ISD::SUBC, MVT::i32, Expand);
+ setOperationAction(ISD::SUBC, MVT::i64, Expand);
+
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::CTPOP, MVT::i64, Expand);
setOperationAction(ISD::CTTZ , MVT::i32, Expand);
@@ -1436,6 +1480,8 @@ HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default: return 0;
case HexagonISD::CONST32: return "HexagonISD::CONST32";
+ case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
+ case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real";
case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
@@ -1484,6 +1530,7 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
+ case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h
index 65dab85..3279cc6 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/lib/Target/Hexagon/HexagonISelLowering.h
@@ -27,6 +27,7 @@ namespace llvm {
CONST32,
CONST32_GP, // For marking data present in GP.
+ CONST32_Int_Real,
FCONST32,
SETCC,
ADJDYNALLOC,
@@ -106,6 +107,7 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index d30cdda..96a252e 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -557,218 +557,43 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
}
bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
- switch(MI->getOpcode()) {
- default: return false;
- // JMP_EQri
- case Hexagon::JMP_EQriPt_nv_V4:
- case Hexagon::JMP_EQriPnt_nv_V4:
- case Hexagon::JMP_EQriNotPt_nv_V4:
- case Hexagon::JMP_EQriNotPnt_nv_V4:
-
- // JMP_EQri - with -1
- case Hexagon::JMP_EQriPtneg_nv_V4:
- case Hexagon::JMP_EQriPntneg_nv_V4:
- case Hexagon::JMP_EQriNotPtneg_nv_V4:
- case Hexagon::JMP_EQriNotPntneg_nv_V4:
-
- // JMP_EQrr
- case Hexagon::JMP_EQrrPt_nv_V4:
- case Hexagon::JMP_EQrrPnt_nv_V4:
- case Hexagon::JMP_EQrrNotPt_nv_V4:
- case Hexagon::JMP_EQrrNotPnt_nv_V4:
-
- // JMP_GTri
- case Hexagon::JMP_GTriPt_nv_V4:
- case Hexagon::JMP_GTriPnt_nv_V4:
- case Hexagon::JMP_GTriNotPt_nv_V4:
- case Hexagon::JMP_GTriNotPnt_nv_V4:
-
- // JMP_GTri - with -1
- case Hexagon::JMP_GTriPtneg_nv_V4:
- case Hexagon::JMP_GTriPntneg_nv_V4:
- case Hexagon::JMP_GTriNotPtneg_nv_V4:
- case Hexagon::JMP_GTriNotPntneg_nv_V4:
-
- // JMP_GTrr
- case Hexagon::JMP_GTrrPt_nv_V4:
- case Hexagon::JMP_GTrrPnt_nv_V4:
- case Hexagon::JMP_GTrrNotPt_nv_V4:
- case Hexagon::JMP_GTrrNotPnt_nv_V4:
-
- // JMP_GTrrdn
- case Hexagon::JMP_GTrrdnPt_nv_V4:
- case Hexagon::JMP_GTrrdnPnt_nv_V4:
- case Hexagon::JMP_GTrrdnNotPt_nv_V4:
- case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
-
- // JMP_GTUri
- case Hexagon::JMP_GTUriPt_nv_V4:
- case Hexagon::JMP_GTUriPnt_nv_V4:
- case Hexagon::JMP_GTUriNotPt_nv_V4:
- case Hexagon::JMP_GTUriNotPnt_nv_V4:
-
- // JMP_GTUrr
- case Hexagon::JMP_GTUrrPt_nv_V4:
- case Hexagon::JMP_GTUrrPnt_nv_V4:
- case Hexagon::JMP_GTUrrNotPt_nv_V4:
- case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+ // Constant extenders are allowed only for V4 and above.
+ if (!Subtarget.hasV4TOps())
+ return false;
- // JMP_GTUrrdn
- case Hexagon::JMP_GTUrrdnPt_nv_V4:
- case Hexagon::JMP_GTUrrdnPnt_nv_V4:
- case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
- case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+ const MCInstrDesc &MID = MI->getDesc();
+ const uint64_t F = MID.TSFlags;
+ if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
+ return true;
- // TFR_FI
+ // TODO: This is largely obsolete now. Will need to be removed
+ // in consecutive patches.
+ switch(MI->getOpcode()) {
+ // TFR_FI Remains a special case.
case Hexagon::TFR_FI:
return true;
+ default:
+ return false;
}
+ return false;
}
+// This returns true in two cases:
+// - The OP code itself indicates that this is an extended instruction.
+// - One of MOs has been marked with HMOTF_ConstExtended flag.
bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
- switch(MI->getOpcode()) {
- default: return false;
- // JMP_EQri
- case Hexagon::JMP_EQriPt_ie_nv_V4:
- case Hexagon::JMP_EQriPnt_ie_nv_V4:
- case Hexagon::JMP_EQriNotPt_ie_nv_V4:
- case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
-
- // JMP_EQri - with -1
- case Hexagon::JMP_EQriPtneg_ie_nv_V4:
- case Hexagon::JMP_EQriPntneg_ie_nv_V4:
- case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
- case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
-
- // JMP_EQrr
- case Hexagon::JMP_EQrrPt_ie_nv_V4:
- case Hexagon::JMP_EQrrPnt_ie_nv_V4:
- case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
- case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
-
- // JMP_GTri
- case Hexagon::JMP_GTriPt_ie_nv_V4:
- case Hexagon::JMP_GTriPnt_ie_nv_V4:
- case Hexagon::JMP_GTriNotPt_ie_nv_V4:
- case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
-
- // JMP_GTri - with -1
- case Hexagon::JMP_GTriPtneg_ie_nv_V4:
- case Hexagon::JMP_GTriPntneg_ie_nv_V4:
- case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
- case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
-
- // JMP_GTrr
- case Hexagon::JMP_GTrrPt_ie_nv_V4:
- case Hexagon::JMP_GTrrPnt_ie_nv_V4:
- case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
- case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
-
- // JMP_GTrrdn
- case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
- case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
- case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
- case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
-
- // JMP_GTUri
- case Hexagon::JMP_GTUriPt_ie_nv_V4:
- case Hexagon::JMP_GTUriPnt_ie_nv_V4:
- case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
- case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
-
- // JMP_GTUrr
- case Hexagon::JMP_GTUrrPt_ie_nv_V4:
- case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
- case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
- case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
-
- // JMP_GTUrrdn
- case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
- case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
- case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
- case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
-
- // V4 absolute set addressing.
- case Hexagon::LDrid_abs_setimm_V4:
- case Hexagon::LDriw_abs_setimm_V4:
- case Hexagon::LDrih_abs_setimm_V4:
- case Hexagon::LDrib_abs_setimm_V4:
- case Hexagon::LDriuh_abs_setimm_V4:
- case Hexagon::LDriub_abs_setimm_V4:
-
- case Hexagon::STrid_abs_setimm_V4:
- case Hexagon::STrib_abs_setimm_V4:
- case Hexagon::STrih_abs_setimm_V4:
- case Hexagon::STriw_abs_setimm_V4:
-
- // V4 global address load.
- case Hexagon::LDd_GP_cPt_V4 :
- case Hexagon::LDd_GP_cNotPt_V4 :
- case Hexagon::LDd_GP_cdnPt_V4 :
- case Hexagon::LDd_GP_cdnNotPt_V4 :
- case Hexagon::LDb_GP_cPt_V4 :
- case Hexagon::LDb_GP_cNotPt_V4 :
- case Hexagon::LDb_GP_cdnPt_V4 :
- case Hexagon::LDb_GP_cdnNotPt_V4 :
- case Hexagon::LDub_GP_cPt_V4 :
- case Hexagon::LDub_GP_cNotPt_V4 :
- case Hexagon::LDub_GP_cdnPt_V4 :
- case Hexagon::LDub_GP_cdnNotPt_V4 :
- case Hexagon::LDh_GP_cPt_V4 :
- case Hexagon::LDh_GP_cNotPt_V4 :
- case Hexagon::LDh_GP_cdnPt_V4 :
- case Hexagon::LDh_GP_cdnNotPt_V4 :
- case Hexagon::LDuh_GP_cPt_V4 :
- case Hexagon::LDuh_GP_cNotPt_V4 :
- case Hexagon::LDuh_GP_cdnPt_V4 :
- case Hexagon::LDuh_GP_cdnNotPt_V4 :
- case Hexagon::LDw_GP_cPt_V4 :
- case Hexagon::LDw_GP_cNotPt_V4 :
- case Hexagon::LDw_GP_cdnPt_V4 :
- case Hexagon::LDw_GP_cdnNotPt_V4 :
-
- // V4 global address store.
- case Hexagon::STd_GP_cPt_V4 :
- case Hexagon::STd_GP_cNotPt_V4 :
- case Hexagon::STd_GP_cdnPt_V4 :
- case Hexagon::STd_GP_cdnNotPt_V4 :
- case Hexagon::STb_GP_cPt_V4 :
- case Hexagon::STb_GP_cNotPt_V4 :
- case Hexagon::STb_GP_cdnPt_V4 :
- case Hexagon::STb_GP_cdnNotPt_V4 :
- case Hexagon::STh_GP_cPt_V4 :
- case Hexagon::STh_GP_cNotPt_V4 :
- case Hexagon::STh_GP_cdnPt_V4 :
- case Hexagon::STh_GP_cdnNotPt_V4 :
- case Hexagon::STw_GP_cPt_V4 :
- case Hexagon::STw_GP_cNotPt_V4 :
- case Hexagon::STw_GP_cdnPt_V4 :
- case Hexagon::STw_GP_cdnNotPt_V4 :
-
- // V4 predicated global address new value store.
- case Hexagon::STb_GP_cPt_nv_V4 :
- case Hexagon::STb_GP_cNotPt_nv_V4 :
- case Hexagon::STb_GP_cdnPt_nv_V4 :
- case Hexagon::STb_GP_cdnNotPt_nv_V4 :
- case Hexagon::STh_GP_cPt_nv_V4 :
- case Hexagon::STh_GP_cNotPt_nv_V4 :
- case Hexagon::STh_GP_cdnPt_nv_V4 :
- case Hexagon::STh_GP_cdnNotPt_nv_V4 :
- case Hexagon::STw_GP_cPt_nv_V4 :
- case Hexagon::STw_GP_cNotPt_nv_V4 :
- case Hexagon::STw_GP_cdnPt_nv_V4 :
- case Hexagon::STw_GP_cdnNotPt_nv_V4 :
-
- // TFR_FI
- case Hexagon::TFR_FI_immext_V4:
-
- // TFRI_F
- case Hexagon::TFRI_f:
- case Hexagon::TFRI_cPt_f:
- case Hexagon::TFRI_cNotPt_f:
- case Hexagon::CONST64_Float_Real:
+ // First check if this is permanently extended op code.
+ const uint64_t F = MI->getDesc().TSFlags;
+ if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
+ return true;
+ // Use MO operand flags to determine if one of MI's operands
+ // has HMOTF_ConstExtended flag set.
+ for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
+ E = MI->operands_end(); I != E; ++I) {
+ if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
return true;
}
+ return false;
}
bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
@@ -877,258 +702,6 @@ bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
}
}
-unsigned HexagonInstrInfo::getImmExtForm(const MachineInstr* MI) const {
- switch(MI->getOpcode()) {
- default: llvm_unreachable("Unknown type of instruction.");
- // JMP_EQri
- case Hexagon::JMP_EQriPt_nv_V4:
- return Hexagon::JMP_EQriPt_ie_nv_V4;
- case Hexagon::JMP_EQriNotPt_nv_V4:
- return Hexagon::JMP_EQriNotPt_ie_nv_V4;
- case Hexagon::JMP_EQriPnt_nv_V4:
- return Hexagon::JMP_EQriPnt_ie_nv_V4;
- case Hexagon::JMP_EQriNotPnt_nv_V4:
- return Hexagon::JMP_EQriNotPnt_ie_nv_V4;
-
- // JMP_EQri -- with -1
- case Hexagon::JMP_EQriPtneg_nv_V4:
- return Hexagon::JMP_EQriPtneg_ie_nv_V4;
- case Hexagon::JMP_EQriNotPtneg_nv_V4:
- return Hexagon::JMP_EQriNotPtneg_ie_nv_V4;
- case Hexagon::JMP_EQriPntneg_nv_V4:
- return Hexagon::JMP_EQriPntneg_ie_nv_V4;
- case Hexagon::JMP_EQriNotPntneg_nv_V4:
- return Hexagon::JMP_EQriNotPntneg_ie_nv_V4;
-
- // JMP_EQrr
- case Hexagon::JMP_EQrrPt_nv_V4:
- return Hexagon::JMP_EQrrPt_ie_nv_V4;
- case Hexagon::JMP_EQrrNotPt_nv_V4:
- return Hexagon::JMP_EQrrNotPt_ie_nv_V4;
- case Hexagon::JMP_EQrrPnt_nv_V4:
- return Hexagon::JMP_EQrrPnt_ie_nv_V4;
- case Hexagon::JMP_EQrrNotPnt_nv_V4:
- return Hexagon::JMP_EQrrNotPnt_ie_nv_V4;
-
- // JMP_GTri
- case Hexagon::JMP_GTriPt_nv_V4:
- return Hexagon::JMP_GTriPt_ie_nv_V4;
- case Hexagon::JMP_GTriNotPt_nv_V4:
- return Hexagon::JMP_GTriNotPt_ie_nv_V4;
- case Hexagon::JMP_GTriPnt_nv_V4:
- return Hexagon::JMP_GTriPnt_ie_nv_V4;
- case Hexagon::JMP_GTriNotPnt_nv_V4:
- return Hexagon::JMP_GTriNotPnt_ie_nv_V4;
-
- // JMP_GTri -- with -1
- case Hexagon::JMP_GTriPtneg_nv_V4:
- return Hexagon::JMP_GTriPtneg_ie_nv_V4;
- case Hexagon::JMP_GTriNotPtneg_nv_V4:
- return Hexagon::JMP_GTriNotPtneg_ie_nv_V4;
- case Hexagon::JMP_GTriPntneg_nv_V4:
- return Hexagon::JMP_GTriPntneg_ie_nv_V4;
- case Hexagon::JMP_GTriNotPntneg_nv_V4:
- return Hexagon::JMP_GTriNotPntneg_ie_nv_V4;
-
- // JMP_GTrr
- case Hexagon::JMP_GTrrPt_nv_V4:
- return Hexagon::JMP_GTrrPt_ie_nv_V4;
- case Hexagon::JMP_GTrrNotPt_nv_V4:
- return Hexagon::JMP_GTrrNotPt_ie_nv_V4;
- case Hexagon::JMP_GTrrPnt_nv_V4:
- return Hexagon::JMP_GTrrPnt_ie_nv_V4;
- case Hexagon::JMP_GTrrNotPnt_nv_V4:
- return Hexagon::JMP_GTrrNotPnt_ie_nv_V4;
-
- // JMP_GTrrdn
- case Hexagon::JMP_GTrrdnPt_nv_V4:
- return Hexagon::JMP_GTrrdnPt_ie_nv_V4;
- case Hexagon::JMP_GTrrdnNotPt_nv_V4:
- return Hexagon::JMP_GTrrdnNotPt_ie_nv_V4;
- case Hexagon::JMP_GTrrdnPnt_nv_V4:
- return Hexagon::JMP_GTrrdnPnt_ie_nv_V4;
- case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
- return Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4;
-
- // JMP_GTUri
- case Hexagon::JMP_GTUriPt_nv_V4:
- return Hexagon::JMP_GTUriPt_ie_nv_V4;
- case Hexagon::JMP_GTUriNotPt_nv_V4:
- return Hexagon::JMP_GTUriNotPt_ie_nv_V4;
- case Hexagon::JMP_GTUriPnt_nv_V4:
- return Hexagon::JMP_GTUriPnt_ie_nv_V4;
- case Hexagon::JMP_GTUriNotPnt_nv_V4:
- return Hexagon::JMP_GTUriNotPnt_ie_nv_V4;
-
- // JMP_GTUrr
- case Hexagon::JMP_GTUrrPt_nv_V4:
- return Hexagon::JMP_GTUrrPt_ie_nv_V4;
- case Hexagon::JMP_GTUrrNotPt_nv_V4:
- return Hexagon::JMP_GTUrrNotPt_ie_nv_V4;
- case Hexagon::JMP_GTUrrPnt_nv_V4:
- return Hexagon::JMP_GTUrrPnt_ie_nv_V4;
- case Hexagon::JMP_GTUrrNotPnt_nv_V4:
- return Hexagon::JMP_GTUrrNotPnt_ie_nv_V4;
-
- // JMP_GTUrrdn
- case Hexagon::JMP_GTUrrdnPt_nv_V4:
- return Hexagon::JMP_GTUrrdnPt_ie_nv_V4;
- case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
- return Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4;
- case Hexagon::JMP_GTUrrdnPnt_nv_V4:
- return Hexagon::JMP_GTUrrdnPnt_ie_nv_V4;
- case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
- return Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4;
-
- case Hexagon::TFR_FI:
- return Hexagon::TFR_FI_immext_V4;
-
- case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
- case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
- case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
- case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
- case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
- case Hexagon::MEMw_ORr_indexed_MEM_V4 :
- case Hexagon::MEMw_ADDi_MEM_V4 :
- case Hexagon::MEMw_SUBi_MEM_V4 :
- case Hexagon::MEMw_ADDr_MEM_V4 :
- case Hexagon::MEMw_SUBr_MEM_V4 :
- case Hexagon::MEMw_ANDr_MEM_V4 :
- case Hexagon::MEMw_ORr_MEM_V4 :
- case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
- case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
- case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
- case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
- case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
- case Hexagon::MEMh_ORr_indexed_MEM_V4 :
- case Hexagon::MEMh_ADDi_MEM_V4 :
- case Hexagon::MEMh_SUBi_MEM_V4 :
- case Hexagon::MEMh_ADDr_MEM_V4 :
- case Hexagon::MEMh_SUBr_MEM_V4 :
- case Hexagon::MEMh_ANDr_MEM_V4 :
- case Hexagon::MEMh_ORr_MEM_V4 :
- case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
- case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
- case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
- case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
- case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
- case Hexagon::MEMb_ORr_indexed_MEM_V4 :
- case Hexagon::MEMb_ADDi_MEM_V4 :
- case Hexagon::MEMb_SUBi_MEM_V4 :
- case Hexagon::MEMb_ADDr_MEM_V4 :
- case Hexagon::MEMb_SUBr_MEM_V4 :
- case Hexagon::MEMb_ANDr_MEM_V4 :
- case Hexagon::MEMb_ORr_MEM_V4 :
- llvm_unreachable("Needs implementing.");
- }
-}
-
-unsigned HexagonInstrInfo::getNormalBranchForm(const MachineInstr* MI) const {
- switch(MI->getOpcode()) {
- default: llvm_unreachable("Unknown type of jump instruction.");
- // JMP_EQri
- case Hexagon::JMP_EQriPt_ie_nv_V4:
- return Hexagon::JMP_EQriPt_nv_V4;
- case Hexagon::JMP_EQriNotPt_ie_nv_V4:
- return Hexagon::JMP_EQriNotPt_nv_V4;
- case Hexagon::JMP_EQriPnt_ie_nv_V4:
- return Hexagon::JMP_EQriPnt_nv_V4;
- case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
- return Hexagon::JMP_EQriNotPnt_nv_V4;
-
- // JMP_EQri -- with -1
- case Hexagon::JMP_EQriPtneg_ie_nv_V4:
- return Hexagon::JMP_EQriPtneg_nv_V4;
- case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
- return Hexagon::JMP_EQriNotPtneg_nv_V4;
- case Hexagon::JMP_EQriPntneg_ie_nv_V4:
- return Hexagon::JMP_EQriPntneg_nv_V4;
- case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
- return Hexagon::JMP_EQriNotPntneg_nv_V4;
-
- // JMP_EQrr
- case Hexagon::JMP_EQrrPt_ie_nv_V4:
- return Hexagon::JMP_EQrrPt_nv_V4;
- case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
- return Hexagon::JMP_EQrrNotPt_nv_V4;
- case Hexagon::JMP_EQrrPnt_ie_nv_V4:
- return Hexagon::JMP_EQrrPnt_nv_V4;
- case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
- return Hexagon::JMP_EQrrNotPnt_nv_V4;
-
- // JMP_GTri
- case Hexagon::JMP_GTriPt_ie_nv_V4:
- return Hexagon::JMP_GTriPt_nv_V4;
- case Hexagon::JMP_GTriNotPt_ie_nv_V4:
- return Hexagon::JMP_GTriNotPt_nv_V4;
- case Hexagon::JMP_GTriPnt_ie_nv_V4:
- return Hexagon::JMP_GTriPnt_nv_V4;
- case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
- return Hexagon::JMP_GTriNotPnt_nv_V4;
-
- // JMP_GTri -- with -1
- case Hexagon::JMP_GTriPtneg_ie_nv_V4:
- return Hexagon::JMP_GTriPtneg_nv_V4;
- case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
- return Hexagon::JMP_GTriNotPtneg_nv_V4;
- case Hexagon::JMP_GTriPntneg_ie_nv_V4:
- return Hexagon::JMP_GTriPntneg_nv_V4;
- case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
- return Hexagon::JMP_GTriNotPntneg_nv_V4;
-
- // JMP_GTrr
- case Hexagon::JMP_GTrrPt_ie_nv_V4:
- return Hexagon::JMP_GTrrPt_nv_V4;
- case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
- return Hexagon::JMP_GTrrNotPt_nv_V4;
- case Hexagon::JMP_GTrrPnt_ie_nv_V4:
- return Hexagon::JMP_GTrrPnt_nv_V4;
- case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
- return Hexagon::JMP_GTrrNotPnt_nv_V4;
-
- // JMP_GTrrdn
- case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
- return Hexagon::JMP_GTrrdnPt_nv_V4;
- case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
- return Hexagon::JMP_GTrrdnNotPt_nv_V4;
- case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
- return Hexagon::JMP_GTrrdnPnt_nv_V4;
- case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
- return Hexagon::JMP_GTrrdnNotPnt_nv_V4;
-
- // JMP_GTUri
- case Hexagon::JMP_GTUriPt_ie_nv_V4:
- return Hexagon::JMP_GTUriPt_nv_V4;
- case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
- return Hexagon::JMP_GTUriNotPt_nv_V4;
- case Hexagon::JMP_GTUriPnt_ie_nv_V4:
- return Hexagon::JMP_GTUriPnt_nv_V4;
- case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
- return Hexagon::JMP_GTUriNotPnt_nv_V4;
-
- // JMP_GTUrr
- case Hexagon::JMP_GTUrrPt_ie_nv_V4:
- return Hexagon::JMP_GTUrrPt_nv_V4;
- case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
- return Hexagon::JMP_GTUrrNotPt_nv_V4;
- case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
- return Hexagon::JMP_GTUrrPnt_nv_V4;
- case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
- return Hexagon::JMP_GTUrrNotPnt_nv_V4;
-
- // JMP_GTUrrdn
- case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
- return Hexagon::JMP_GTUrrdnPt_nv_V4;
- case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
- return Hexagon::JMP_GTUrrdnNotPt_nv_V4;
- case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
- return Hexagon::JMP_GTUrrdnPnt_nv_V4;
- case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
- return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
- }
-}
-
-
bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
default: return false;
@@ -1326,6 +899,16 @@ bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
}
}
+bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
+ if (isNewValueJump(MI))
+ return true;
+
+ if (isNewValueStore(MI))
+ return true;
+
+ return false;
+}
+
bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4;
}
@@ -2366,6 +1949,10 @@ isValidOffset(const int Opcode, const int Offset) const {
// the given "Opcode". If "Offset" is not in the correct range, "ADD_ri" is
// inserted to calculate the final address. Due to this reason, the function
// assumes that the "Offset" has correct alignment.
+ // We used to assert if the offset was not properly aligned, however,
+ // there are cases where a misaligned pointer recast can cause this
+ // problem, and we need to allow for it. The front end warns of such
+ // misaligns with respect to load size.
switch(Opcode) {
@@ -2375,7 +1962,6 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::STriw_indexed:
case Hexagon::STriw:
case Hexagon::STriw_f:
- assert((Offset % 4 == 0) && "Offset has incorrect alignment");
return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
(Offset <= Hexagon_MEMW_OFFSET_MAX);
@@ -2385,14 +1971,12 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::STrid:
case Hexagon::STrid_indexed:
case Hexagon::STrid_f:
- assert((Offset % 8 == 0) && "Offset has incorrect alignment");
return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
(Offset <= Hexagon_MEMD_OFFSET_MAX);
case Hexagon::LDrih:
case Hexagon::LDriuh:
case Hexagon::STrih:
- assert((Offset % 2 == 0) && "Offset has incorrect alignment");
return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
(Offset <= Hexagon_MEMH_OFFSET_MAX);
@@ -2419,7 +2003,6 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::MEMw_SUBr_MEM_V4 :
case Hexagon::MEMw_ANDr_MEM_V4 :
case Hexagon::MEMw_ORr_MEM_V4 :
- assert ((Offset % 4) == 0 && "MEMOPw offset is not aligned correctly." );
return (0 <= Offset && Offset <= 255);
case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
@@ -2434,7 +2017,6 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::MEMh_SUBr_MEM_V4 :
case Hexagon::MEMh_ANDr_MEM_V4 :
case Hexagon::MEMh_ORr_MEM_V4 :
- assert ((Offset % 2) == 0 && "MEMOPh offset is not aligned correctly." );
return (0 <= Offset && Offset <= 127);
case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
@@ -2800,7 +2382,26 @@ isConditionalStore (const MachineInstr* MI) const {
}
}
+unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
+ const uint64_t F = MI->getDesc().TSFlags;
+
+ return((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
+}
+/// immediateExtend - Changes the instruction in place to one using an immediate
+/// extender.
+void HexagonInstrInfo::immediateExtend(MachineInstr *MI) const {
+ assert((isExtendable(MI)||isConstExtended(MI)) &&
+ "Instruction must be extendable");
+ // Find which operand is extendable.
+ short ExtOpNum = getCExtOpNum(MI);
+ MachineOperand &MO = MI->getOperand(ExtOpNum);
+ // This needs to be something we understand.
+ assert((MO.isMBB() || MO.isImm()) &&
+ "Branch with unknown extendable field type");
+ // Mark given operand as extended.
+ MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
+}
DFAPacketizer *HexagonInstrInfo::
CreateTargetScheduleState(const TargetMachine *TM,
@@ -2827,3 +2428,155 @@ bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
return false;
}
+
+bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
+
+ // Constant extenders are allowed only for V4 and above.
+ if (!Subtarget.hasV4TOps())
+ return false;
+
+ const uint64_t F = MI->getDesc().TSFlags;
+ unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
+ if (isExtended) // Instruction must be extended.
+ return true;
+
+ unsigned isExtendable = (F >> HexagonII::ExtendablePos)
+ & HexagonII::ExtendableMask;
+ if (!isExtendable)
+ return false;
+
+ short ExtOpNum = getCExtOpNum(MI);
+ const MachineOperand &MO = MI->getOperand(ExtOpNum);
+ // Use MO operand flags to determine if MO
+ // has the HMOTF_ConstExtended flag set.
+ if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
+ return true;
+ // If this is a Machine BB address we are talking about, and it is
+ // not marked as extended, say so.
+ if (MO.isMBB())
+ return false;
+
+ // We could be using an instruction with an extendable immediate and shoehorn
+ // a global address into it. If it is a global address it will be constant
+ // extended. We do this for COMBINE.
+ // We currently only handle isGlobal() because it is the only kind of
+ // object we are going to end up with here for now.
+ // In the future we probably should add isSymbol(), etc.
+ if (MO.isGlobal() || MO.isSymbol())
+ return true;
+
+ // If the extendable operand is not 'Immediate' type, the instruction should
+ // have 'isExtended' flag set.
+ assert(MO.isImm() && "Extendable operand must be Immediate type");
+
+ int MinValue = getMinValue(MI);
+ int MaxValue = getMaxValue(MI);
+ int ImmValue = MO.getImm();
+
+ return (ImmValue < MinValue || ImmValue > MaxValue);
+}
+
+// Returns true if a particular operand is extendable for an instruction.
+bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
+ unsigned short OperandNum) const {
+ // Constant extenders are allowed only for V4 and above.
+ if (!Subtarget.hasV4TOps())
+ return false;
+
+ const uint64_t F = MI->getDesc().TSFlags;
+
+ return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
+ == OperandNum;
+}
+
+// Returns Operand Index for the constant extended instruction.
+unsigned short HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const {
+ const uint64_t F = MI->getDesc().TSFlags;
+ return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask);
+}
+
+// Returns the min value that doesn't need to be extended.
+int HexagonInstrInfo::getMinValue(const MachineInstr *MI) const {
+ const uint64_t F = MI->getDesc().TSFlags;
+ unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
+ & HexagonII::ExtentSignedMask;
+ unsigned bits = (F >> HexagonII::ExtentBitsPos)
+ & HexagonII::ExtentBitsMask;
+
+ if (isSigned) // if value is signed
+ return -1 << (bits - 1);
+ else
+ return 0;
+}
+
+// Returns the max value that doesn't need to be extended.
+int HexagonInstrInfo::getMaxValue(const MachineInstr *MI) const {
+ const uint64_t F = MI->getDesc().TSFlags;
+ unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
+ & HexagonII::ExtentSignedMask;
+ unsigned bits = (F >> HexagonII::ExtentBitsPos)
+ & HexagonII::ExtentBitsMask;
+
+ if (isSigned) // if value is signed
+ return ~(-1 << (bits - 1));
+ else
+ return ~(-1 << bits);
+}
+
+// Returns true if an instruction can be converted into a non-extended
+// equivalent instruction.
+bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const {
+
+ short NonExtOpcode;
+ // Check if the instruction has a register form that uses register in place
+ // of the extended operand, if so return that as the non-extended form.
+ if (Hexagon::getRegForm(MI->getOpcode()) >= 0)
+ return true;
+
+ if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
+ // Check addressing mode and retreive non-ext equivalent instruction.
+
+ switch (getAddrMode(MI)) {
+ case HexagonII::Absolute :
+ // Load/store with absolute addressing mode can be converted into
+ // base+offset mode.
+ NonExtOpcode = Hexagon::getBasedWithImmOffset(MI->getOpcode());
+ break;
+ case HexagonII::BaseImmOffset :
+ // Load/store with base+offset addressing mode can be converted into
+ // base+register offset addressing mode. However left shift operand should
+ // be set to 0.
+ NonExtOpcode = Hexagon::getBaseWithRegOffset(MI->getOpcode());
+ break;
+ default:
+ return false;
+ }
+ if (NonExtOpcode < 0)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+// Returns opcode of the non-extended equivalent instruction.
+short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
+
+ // Check if the instruction has a register form that uses register in place
+ // of the extended operand, if so return that as the non-extended form.
+ short NonExtOpcode = Hexagon::getRegForm(MI->getOpcode());
+ if (NonExtOpcode >= 0)
+ return NonExtOpcode;
+
+ if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
+ // Check addressing mode and retreive non-ext equivalent instruction.
+ switch (getAddrMode(MI)) {
+ case HexagonII::Absolute :
+ return Hexagon::getBasedWithImmOffset(MI->getOpcode());
+ case HexagonII::BaseImmOffset :
+ return Hexagon::getBaseWithRegOffset(MI->getOpcode());
+ default:
+ return -1;
+ }
+ }
+ return -1;
+}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index 4e36dfb..d2f059a 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -169,6 +169,7 @@ public:
bool isConditionalALU32 (const MachineInstr* MI) const;
bool isConditionalLoad (const MachineInstr* MI) const;
bool isConditionalStore(const MachineInstr* MI) const;
+ bool isNewValueInst(const MachineInstr* MI) const;
bool isDeallocRet(const MachineInstr *MI) const;
unsigned getInvertedPredicatedOpcode(const int Opc) const;
bool isExtendable(const MachineInstr* MI) const;
@@ -177,9 +178,18 @@ public:
bool isNewValueStore(const MachineInstr* MI) const;
bool isNewValueJump(const MachineInstr* MI) const;
bool isNewValueJumpCandidate(const MachineInstr *MI) const;
- unsigned getImmExtForm(const MachineInstr* MI) const;
- unsigned getNormalBranchForm(const MachineInstr* MI) const;
+
+ void immediateExtend(MachineInstr *MI) const;
+ bool isConstExtended(MachineInstr *MI) const;
+ unsigned getAddrMode(const MachineInstr* MI) const;
+ bool isOperandExtended(const MachineInstr *MI,
+ unsigned short OperandNum) const;
+ unsigned short getCExtOpNum(const MachineInstr *MI) const;
+ int getMinValue(const MachineInstr *MI) const;
+ int getMaxValue(const MachineInstr *MI) const;
+ bool NonExtEquivalentExists (const MachineInstr *MI) const;
+ short getNonExtOpcode(const MachineInstr *MI) const;
private:
int getMatchingCondBranchOpcode(int Opc, bool sense) const;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td
index 082772a..d7bab20 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -251,28 +251,55 @@ multiclass TFR_base<string CextOp> {
}
}
+class T_TFR64_Pred<bit PredNot, bit isPredNew>
+ : ALU32_rr<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, DoubleRegs:$src2),
+ !if(PredNot, "if (!$src1", "if ($src1")#
+ !if(isPredNew, ".new) ", ") ")#"$dst = $src2", []>
+{
+ bits<5> dst;
+ bits<2> src1;
+ bits<5> src2;
+
+ let IClass = 0b1111;
+ let Inst{27-24} = 0b1101;
+ let Inst{13} = isPredNew;
+ let Inst{7} = PredNot;
+ let Inst{4-0} = dst;
+ let Inst{6-5} = src1;
+ let Inst{20-17} = src2{4-1};
+ let Inst{16} = 0b1;
+ let Inst{12-9} = src2{4-1};
+ let Inst{8} = 0b0;
+}
+
multiclass TFR64_Pred<bit PredNot> {
let PredSense = !if(PredNot, "false", "true") in {
- def _c#NAME : ALU32_rr<(outs DoubleRegs:$dst),
- (ins PredRegs:$src1, DoubleRegs:$src2),
- !if(PredNot, "if (!$src1", "if ($src1")#") $dst = $src2",
- []>;
- // Predicate new
+ def _c#NAME : T_TFR64_Pred<PredNot, 0>;
+
let PNewValue = "new" in
- def _cdn#NAME : ALU32_rr<(outs DoubleRegs:$dst),
- (ins PredRegs:$src1, DoubleRegs:$src2),
- !if(PredNot, "if (!$src1", "if ($src1")#".new) $dst = $src2",
- []>;
+ def _cdn#NAME : T_TFR64_Pred<PredNot, 1>; // Predicate new
}
}
-let InputType = "reg", neverHasSideEffects = 1 in
-multiclass TFR64_base<string CextOp> {
- let CextOpcode = CextOp, BaseOpcode = CextOp in {
+let neverHasSideEffects = 1 in
+multiclass TFR64_base<string BaseName> {
+ let BaseOpcode = BaseName in {
let isPredicable = 1 in
- def NAME : ALU32_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
- "$dst = $src1",
- []>;
+ def NAME : ALU32Inst <(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1),
+ "$dst = $src1" > {
+ bits<5> dst;
+ bits<5> src1;
+
+ let IClass = 0b1111;
+ let Inst{27-23} = 0b01010;
+ let Inst{4-0} = dst;
+ let Inst{20-17} = src1{4-1};
+ let Inst{16} = 0b1;
+ let Inst{12-9} = src1{4-1};
+ let Inst{8} = 0b0;
+ }
let isPredicated = 1 in {
defm Pt : TFR64_Pred<0>;
@@ -281,9 +308,8 @@ multiclass TFR64_base<string CextOp> {
}
}
-
multiclass TFRI_Pred<bit PredNot> {
- let PredSense = !if(PredNot, "false", "true") in {
+ let isMoveImm = 1, PredSense = !if(PredNot, "false", "true") in {
def _c#NAME : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, s12Ext:$src2),
!if(PredNot, "if (!$src1", "if ($src1")#") $dst = #$src2",
@@ -301,8 +327,8 @@ multiclass TFRI_Pred<bit PredNot> {
let InputType = "imm", isExtendable = 1, isExtentSigned = 1 in
multiclass TFRI_base<string CextOp> {
let CextOpcode = CextOp, BaseOpcode = CextOp#I in {
- let opExtendable = 1, opExtentBits = 16, isMoveImm = 1, isPredicable = 1,
- isReMaterializable = 1 in
+ let isAsCheapAsAMove = 1 , opExtendable = 1, opExtentBits = 16,
+ isMoveImm = 1, isPredicable = 1, isReMaterializable = 1 in
def NAME : ALU32_ri<(outs IntRegs:$dst), (ins s16Ext:$src1),
"$dst = #$src1",
[(set (i32 IntRegs:$dst), s16ExtPred:$src1)]>;
@@ -317,7 +343,7 @@ multiclass TFRI_base<string CextOp> {
defm TFRI : TFRI_base<"TFR">, ImmRegRel, PredNewRel;
defm TFR : TFR_base<"TFR">, ImmRegRel, PredNewRel;
-defm TFR64 : TFR64_base<"TFR64">, ImmRegRel, PredNewRel;
+defm TFR64 : TFR64_base<"TFR64">, PredNewRel;
// Transfer control register.
let neverHasSideEffects = 1 in
@@ -2050,6 +2076,10 @@ def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins i32imm:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst), imm:$global) ]>;
+// Map BlockAddress lowering to CONST32_Int_Real
+def : Pat<(HexagonCONST32_GP tblockaddress:$addr),
+ (CONST32_Int_Real tblockaddress:$addr)>;
+
let isReMaterializable = 1, isMoveImm = 1 in
def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins bblabel:$label),
"$dst = CONST32($label)",
@@ -2845,6 +2875,18 @@ def: Pat <(i64 (zextloadi8 (add (i32 IntRegs:$src1),
s11_0ExtPred:$offset)))>,
Requires<[NoV4T]>;
+// i1 -> i64
+def: Pat <(i64 (zextloadi1 ADDRriS11_0:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), (LDriub ADDRriS11_0:$src1)))>,
+ Requires<[NoV4T]>;
+
+let AddedComplexity = 20 in
+def: Pat <(i64 (zextloadi1 (add (i32 IntRegs:$src1),
+ s11_0ExtPred:$offset))),
+ (i64 (COMBINE_rr (TFRI 0), (LDriub_indexed IntRegs:$src1,
+ s11_0ExtPred:$offset)))>,
+ Requires<[NoV4T]>;
+
// i16 -> i64
def: Pat <(i64 (zextloadi16 ADDRriS11_1:$src1)),
(i64 (COMBINE_rr (TFRI 0), (LDriuh ADDRriS11_1:$src1)))>,
@@ -3020,6 +3062,11 @@ def BR_JT : JRInst<(outs), (ins IntRegs:$src),
"jumpr $src",
[(HexagonBR_JT (i32 IntRegs:$src))]>;
+let isBranch=1, isIndirectBranch=1, isTerminator=1 in
+def BRIND : JRInst<(outs), (ins IntRegs:$src),
+ "jumpr $src",
+ [(brind (i32 IntRegs:$src))]>;
+
def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>;
def : Pat<(HexagonWrapperJT tjumptable:$dst),
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td
index e1b2f88..1d0643d 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -940,6 +940,18 @@ def: Pat <(i64 (zextloadi8 (add (i32 IntRegs:$src1),
s11_0ExtPred:$offset)))>,
Requires<[HasV4T]>;
+// zext i1->i64
+def: Pat <(i64 (zextloadi1 ADDRriS11_0:$src1)),
+ (i64 (COMBINE_Ir_V4 0, (LDriub ADDRriS11_0:$src1)))>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 20 in
+def: Pat <(i64 (zextloadi1 (add (i32 IntRegs:$src1),
+ s11_0ExtPred:$offset))),
+ (i64 (COMBINE_Ir_V4 0, (LDriub_indexed IntRegs:$src1,
+ s11_0ExtPred:$offset)))>,
+ Requires<[HasV4T]>;
+
// zext i16->i64
def: Pat <(i64 (zextloadi16 ADDRriS11_1:$src1)),
(i64 (COMBINE_Ir_V4 0, (LDriuh ADDRriS11_1:$src1)))>,
@@ -3790,6 +3802,11 @@ def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$src1),
[(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>,
Requires<[HasV4T]>;
+// Transfer a block address into a register
+def : Pat<(HexagonCONST32_GP tblockaddress:$src1),
+ (TFRI_V4 tblockaddress:$src1)>,
+ Requires<[HasV4T]>;
+
let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, globaladdress:$src2),
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index ced17b3..1388ad4 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -15,7 +15,8 @@
#define DEBUG_TYPE "misched"
#include "HexagonMachineScheduler.h"
-#include <queue>
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/IR/Function.h"
using namespace llvm;
@@ -159,6 +160,9 @@ void VLIWMachineScheduler::schedule() {
SchedImpl->initialize(this);
// To view Height/Depth correctly, they should be accessed at least once.
+ //
+ // FIXME: SUnit::dumpAll always recompute depth and height now. The max
+ // depth/height could be computed directly from the roots and leaves.
DEBUG(unsigned maxH = 0;
for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
if (SUnits[su].getHeight() > maxH)
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index aff6b86..866beb1 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -17,35 +17,36 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "packets"
-#include "Hexagon.h"
-#include "HexagonMachineFunctionInfo.h"
-#include "HexagonRegisterInfo.h"
-#include "HexagonSubtarget.h"
-#include "HexagonTargetMachine.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/DFAPacketizer.h"
-#include "llvm/CodeGen/LatencyPriorityQueue.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/MC/MCInstrItineraries.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegisterInfo.h"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+
#include <map>
using namespace llvm;
@@ -257,7 +258,7 @@ void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr* MI) {
bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
- assert(QII->isExtended(MI) &&
+ assert((QII->isExtended(MI) || QII->isConstExtended(MI)) &&
"Should only be called for constant extended instructions");
MachineFunction *MF = MI->getParent()->getParent();
MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT_i),
@@ -350,17 +351,6 @@ static bool IsControlFlow(MachineInstr* MI) {
return (MI->getDesc().isTerminator() || MI->getDesc().isCall());
}
-bool HexagonPacketizerList::isNewValueInst(MachineInstr* MI) {
- const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
- if (QII->isNewValueJump(MI))
- return true;
-
- if (QII->isNewValueStore(MI))
- return true;
-
- return false;
-}
-
// Function returns true if an instruction can be promoted to the new-value
// store. It will always return false for v2 and v3.
// It lists all the conditional and unconditional stores that can be promoted
@@ -2165,7 +2155,8 @@ static bool GetPredicateSense(MachineInstr* MI,
}
bool HexagonPacketizerList::isDotNewInst(MachineInstr* MI) {
- if (isNewValueInst(MI))
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ if (QII->isNewValueInst(MI))
return true;
switch (MI->getOpcode()) {
@@ -2893,13 +2884,13 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// dealloc_return and memop always take SLOT0.
// Arch spec 3.4.4.2
if (QRI->Subtarget.hasV4TOps()) {
-
- if (MCIDI.mayStore() && MCIDJ.mayStore() && isNewValueInst(J)) {
+ if (MCIDI.mayStore() && MCIDJ.mayStore() &&
+ (QII->isNewValueInst(J) || QII->isMemOp(J) || QII->isMemOp(I))) {
Dependence = true;
return false;
}
- if ( (QII->isMemOp(J) && MCIDI.mayStore())
+ if ((QII->isMemOp(J) && MCIDI.mayStore())
|| (MCIDJ.mayStore() && QII->isMemOp(I))
|| (QII->isMemOp(J) && QII->isMemOp(I))) {
Dependence = true;
@@ -3196,7 +3187,7 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
MachineInstr *nvjMI = MII;
assert(ResourceTracker->canReserveResources(MI));
ResourceTracker->reserveResources(MI);
- if (QII->isExtended(MI) &&
+ if ((QII->isExtended(MI) || QII->isConstExtended(MI)) &&
!tryAllocateResourcesForConstExt(MI)) {
endPacket(MBB, MI);
ResourceTracker->reserveResources(MI);
@@ -3216,7 +3207,7 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
&& (!tryAllocateResourcesForConstExt(nvjMI)
|| !ResourceTracker->canReserveResources(nvjMI)))
|| // For non-extended instruction, no need to allocate extra 4 bytes.
- (!QII->isExtended(nvjMI) &&
+ (!QII->isExtended(nvjMI) &&
!ResourceTracker->canReserveResources(nvjMI)))
{
endPacket(MBB, MI);
@@ -3232,7 +3223,7 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
CurrentPacketMIs.push_back(MI);
CurrentPacketMIs.push_back(nvjMI);
} else {
- if ( QII->isExtended(MI)
+ if ( (QII->isExtended(MI) || QII->isConstExtended(MI))
&& ( !tryAllocateResourcesForConstExt(MI)
|| !ResourceTracker->canReserveResources(MI)))
{
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index 5f9718b..d4a93b5 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -154,6 +154,28 @@ namespace HexagonII {
// *** The code above must match HexagonInstrFormat*.td *** //
+ // Hexagon specific MO operand flag mask.
+ enum HexagonMOTargetFlagVal {
+ //===------------------------------------------------------------------===//
+ // Hexagon Specific MachineOperand flags.
+ MO_NO_FLAG,
+
+ HMOTF_ConstExtended = 1,
+
+ /// MO_PCREL - On a symbol operand, indicates a PC-relative relocation
+ /// Used for computing a global address for PIC compilations
+ MO_PCREL,
+
+ /// MO_GOT - Indicates a GOT-relative relocation
+ MO_GOT,
+
+ // Low or high part of a symbol.
+ MO_LO16, MO_HI16,
+
+ // Offset from the base of the SDA.
+ MO_GPREL
+ };
+
} // End namespace HexagonII.
} // End namespace llvm.