diff options
author | Dan Gohman <gohman@apple.com> | 2008-08-20 15:24:22 +0000 |
---|---|---|
committer | Dan Gohman <gohman@apple.com> | 2008-08-20 15:24:22 +0000 |
commit | 67ca6be16ae434f3edab0b6927fa80d04ad0828e (patch) | |
tree | b83157cfd315814216d610b45230a55c9c071792 /lib | |
parent | c53ec498857aa3e6d4fc17bd67dd282dcfc821da (diff) | |
download | external_llvm-67ca6be16ae434f3edab0b6927fa80d04ad0828e.zip external_llvm-67ca6be16ae434f3edab0b6927fa80d04ad0828e.tar.gz external_llvm-67ca6be16ae434f3edab0b6927fa80d04ad0828e.tar.bz2 |
Tablegen generated code already tests the opcode value, so it's not
necessary to use dyn_cast in these predicates.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55055 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Target/Alpha/AlphaInstrInfo.td | 8 | ||||
-rw-r--r-- | lib/Target/TargetSelectionDAG.td | 352 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrInfo.td | 34 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrSSE.td | 36 |
4 files changed, 183 insertions, 247 deletions
diff --git a/lib/Target/Alpha/AlphaInstrInfo.td b/lib/Target/Alpha/AlphaInstrInfo.td index 5078893..2dc39eb 100644 --- a/lib/Target/Alpha/AlphaInstrInfo.td +++ b/lib/Target/Alpha/AlphaInstrInfo.td @@ -89,11 +89,9 @@ def immSExt16int : PatLeaf<(imm), [{ //(int)imm fits in a 16 bit sign extended }], SExt16>; def zappat : PatFrag<(ops node:$LHS), (and node:$LHS, imm:$L), [{ - if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1))) { - uint64_t build = get_zapImm(N->getOperand(0), (uint64_t)RHS->getValue()); - return build != 0; - } - return false; + ConstantSDNode *RHS = cast<ConstantSDNode>(N->getOperand(1)); + uint64_t build = get_zapImm(N->getOperand(0), (uint64_t)RHS->getValue()); + return build != 0; }]>; def immFPZ : PatLeaf<(fpimm), [{ //the only fpconstant nodes are +/- 0.0 diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index 5dba0bc..3322ba2 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -497,347 +497,291 @@ def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>; // load fragments. def load : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::NON_EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::NON_EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED; }]>; // extending load fragments. def extloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i1; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i1; }]>; def extloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i8; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i8; }]>; def extloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i16; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i16; }]>; def extloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i32; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i32; }]>; def extloadf32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::f32; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::f32; }]>; def extloadf64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::f64; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::f64; }]>; def sextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::SEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i1; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i1; }]>; def sextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::SEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i8; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i8; }]>; def sextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::SEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i16; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i16; }]>; def sextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::SEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i32; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i32; }]>; def zextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::ZEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i1; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i1; }]>; def zextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::ZEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i8; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i8; }]>; def zextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::ZEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i16; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i16; }]>; def zextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::ZEXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getMemoryVT() == MVT::i32; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i32; }]>; // store fragments. def store : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) - return !ST->isTruncatingStore() && - ST->getAddressingMode() == ISD::UNINDEXED; - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + return !ST->isTruncatingStore() && + ST->getAddressingMode() == ISD::UNINDEXED; }]>; // truncstore fragments. def truncstorei8 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) - return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 && - ST->getAddressingMode() == ISD::UNINDEXED; - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 && + ST->getAddressingMode() == ISD::UNINDEXED; }]>; def truncstorei16 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) - return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 && - ST->getAddressingMode() == ISD::UNINDEXED; - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 && + ST->getAddressingMode() == ISD::UNINDEXED; }]>; def truncstorei32 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) + StoreSDNode *ST = cast<StoreSDNode>(N); return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32 && ST->getAddressingMode() == ISD::UNINDEXED; return false; }]>; def truncstoref32 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) - return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 && - ST->getAddressingMode() == ISD::UNINDEXED; - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 && + ST->getAddressingMode() == ISD::UNINDEXED; }]>; def truncstoref64 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) - return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 && - ST->getAddressingMode() == ISD::UNINDEXED; - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 && + ST->getAddressingMode() == ISD::UNINDEXED; }]>; // indexed store fragments. def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - !ST->isTruncatingStore(); - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + !ST->isTruncatingStore(); }]>; def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; }]>; def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; }]>; def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; }]>; def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; }]>; def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; }]>; def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (ist node:$val, node:$ptr, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return !ST->isTruncatingStore() && - (AM == ISD::POST_INC || AM == ISD::POST_DEC); - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return !ST->isTruncatingStore() && + (AM == ISD::POST_INC || AM == ISD::POST_DEC); }]>; def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; }]>; def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; }]>; def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; }]>; def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; }]>; def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { - ISD::MemIndexedMode AM = ST->getAddressingMode(); - return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; - } - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; }]>; -//Atomic patterns +// Atomic patterns def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i8; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; }]>; def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i16; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; }]>; def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i32; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; }]>; def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i64; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; }]>; def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc), (atomic_load_add node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i8; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; }]>; def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc), (atomic_load_add node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i16; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; }]>; def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc), (atomic_load_add node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i32; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; }]>; def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc), (atomic_load_add node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i64; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; }]>; def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc), (atomic_swap node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i8; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i8; }]>; def atomic_swap_16 : PatFrag<(ops node:$ptr, node:$inc), (atomic_swap node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i16; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i16; }]>; def atomic_swap_32 : PatFrag<(ops node:$ptr, node:$inc), (atomic_swap node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i32; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i32; }]>; def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc), (atomic_swap node:$ptr, node:$inc), [{ - if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) - return V->getValueType(0) == MVT::i64; - return false; + AtomicSDNode* V = cast<AtomicSDNode>(N); + return V->getValueType(0) == MVT::i64; }]>; diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 37a5fed..abf95ee 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -232,28 +232,26 @@ def i32immSExt8 : PatLeaf<(i32 imm), [{ // It's always safe to treat a anyext i16 load as a i32 load if the i16 is // known to be 32-bit aligned or better. Ditto for i8 to i16. def loadi16 : PatFrag<(ops node:$ptr), (i16 (ld node:$ptr)), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { - if (LD->getAddressingMode() != ISD::UNINDEXED) - return false; - ISD::LoadExtType ExtType = LD->getExtensionType(); - if (ExtType == ISD::NON_EXTLOAD) - return true; - if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 2 && !LD->isVolatile(); - } + LoadSDNode *LD = cast<LoadSDNode>(N); + if (LD->getAddressingMode() != ISD::UNINDEXED) + return false; + ISD::LoadExtType ExtType = LD->getExtensionType(); + if (ExtType == ISD::NON_EXTLOAD) + return true; + if (ExtType == ISD::EXTLOAD) + return LD->getAlignment() >= 2 && !LD->isVolatile(); return false; }]>; def loadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { - if (LD->getAddressingMode() != ISD::UNINDEXED) - return false; - ISD::LoadExtType ExtType = LD->getExtensionType(); - if (ExtType == ISD::NON_EXTLOAD) - return true; - if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 4 && !LD->isVolatile(); - } + LoadSDNode *LD = cast<LoadSDNode>(N); + if (LD->getAddressingMode() != ISD::UNINDEXED) + return false; + ISD::LoadExtType ExtType = LD->getExtensionType(); + if (ExtType == ISD::NON_EXTLOAD) + return true; + if (ExtType == ISD::EXTLOAD) + return LD->getAlignment() >= 4 && !LD->isVolatile(); return false; }]>; diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 963a605..bb3f227 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -99,20 +99,18 @@ def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; // Like 'store', but always requires vector alignment. def alignedstore : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ - if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) - return !ST->isTruncatingStore() && - ST->getAddressingMode() == ISD::UNINDEXED && - ST->getAlignment() >= 16; - return false; + StoreSDNode *ST = cast<StoreSDNode>(N); + return !ST->isTruncatingStore() && + ST->getAddressingMode() == ISD::UNINDEXED && + ST->getAlignment() >= 16; }]>; // Like 'load', but always requires vector alignment. def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::NON_EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getAlignment() >= 16; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::NON_EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getAlignment() >= 16; }]>; def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>; @@ -128,11 +126,10 @@ def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))> // FIXME: Actually implement support for targets that don't require the // alignment. This probably wants a subtarget predicate. def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::NON_EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getAlignment() >= 16; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::NON_EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getAlignment() >= 16; }]>; def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>; @@ -147,11 +144,10 @@ def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>; // 16-byte boundary. // FIXME: 8 byte alignment for mmx reads is not required def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ - if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) - return LD->getExtensionType() == ISD::NON_EXTLOAD && - LD->getAddressingMode() == ISD::UNINDEXED && - LD->getAlignment() >= 8; - return false; + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getExtensionType() == ISD::NON_EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getAlignment() >= 8; }]>; def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>; |